diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml new file mode 100644 index 0000000..400abb7 --- /dev/null +++ b/.github/workflows/check-links.yml @@ -0,0 +1,32 @@ +name: check-links + +on: + # Manually trigger the workflow using + # gh workflow run check-links.yml # --ref + # gh run list --workflow check-links.yml # --branch + workflow_dispatch: + repository_dispatch: + schedule: + - cron: "00 18 * * 1" + +jobs: + linkChecker: + runs-on: ubuntu-latest + permissions: + issues: write # required for peter-evans/create-issue-from-file + steps: + - uses: actions/checkout@v5 + + - name: Link Checker + id: lychee + uses: lycheeverse/lychee-action@v2 + with: + fail: false + + - name: Create Issue From File + if: steps.lychee.outputs.exit_code != 0 + uses: peter-evans/create-issue-from-file@v5 + with: + title: Link Checker Report + content-filepath: ./lychee/out.md + labels: report, automated issue \ No newline at end of file diff --git a/.github/workflows/publish-test.yml b/.github/workflows/publish-test.yml new file mode 100644 index 0000000..f1bcaf2 --- /dev/null +++ b/.github/workflows/publish-test.yml @@ -0,0 +1,56 @@ +name: publish-test + +# This workflow is used to test the publish workflow without actually publishing to PyPI. + +# Differences with the publish workflow: +# - It is triggered manually instead of being triggered by the release workflow and/or tags. +# - It publishes to https://test.pypi.org/ instead of https://pypi.org/, +# which is a separate instance of the Python Package Index that allows trying distribution tools and processes without affecting the real index +# - It enables verbose: true in the pypa/gh-action-pypi-publish action to get more detailed logs for debugging +# - It uses POETRY_DYNAMIC_VERSIONING_BYPASS to bypass the dynamic versioning and use a static version that is accepted by Test PyPI, +# since Test PyPI (and PyPI) reject local versions (+hash) generated by poetry-dynamic-versioning in the absence of tags. +# +# Also keep in mind that OIDC authentication does not work with shared workflows, +# so the code essentially needs to be duplicated in the `publish.yml` and `publish-test.yml` workflows. + +on: + # The workflow is usually triggered when the release workflow completes, but it can also be re-triggered manually if needed using + # gh workflow run publish-test.yml --ref + # gh run list --workflow publish-test.yml + workflow_dispatch: + repository_dispatch: + +env: + MISE_EXPERIMENTAL: true + # Test PyPI (and PyPI) reject local versions (+hash) generated by poetry-dynamic-versioning in the absence of tags. + # So the simplest approach for testing is to override the version with a static one that is accepted by Test PyPI. + POETRY_DYNAMIC_VERSIONING_BYPASS: "0.0.1.dev0" + +jobs: + ci: + runs-on: ubuntu-latest + timeout-minutes: 10 + steps: + - uses: actions/checkout@v6 + - uses: jdx/mise-action@v3 + - run: mise run ci-build + pypi-publish: + name: Upload release to Test PyPI + runs-on: ubuntu-latest + environment: + name: testpypi + # cosmetic URL displayed in the GitHub UI for the environment, + # not to be confused with the repository-url used by the pypa/gh-action-pypi-publish action, + # which is the actual URL of the repository where the package is published + url: https://test.pypi.org/p/nbkp/ + permissions: + id-token: write # IMPORTANT: this permission is mandatory for trusted publishing + steps: + - uses: actions/checkout@v6 + - uses: jdx/mise-action@v3 + - run: mise run ci-build + - name: Publish package distributions to Test PyPI + uses: pypa/gh-action-pypi-publish@release/v1 + with: + repository-url: https://test.pypi.org/legacy/ + verbose: true \ No newline at end of file diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml new file mode 100644 index 0000000..9ca7b83 --- /dev/null +++ b/.github/workflows/publish.yml @@ -0,0 +1,50 @@ +name: publish + +# Test this workflow by making similar changes to `publish-test.yml` and triggering it manually +# +# OIDC authentication does not work with shared workflows, +# so the code essentially needs to be duplicated in the `publish.yml` and `publish-test.yml` workflows. + +on: + push: + tags: + - 'v*' + # Tags created by a workflow using GITHUB_TOKEN do not trigger other workflows + workflow_run: + workflows: ["release"] + types: + - completed + # The workflow is usually triggered when the release workflow completes, but it can also be re-triggered manually if needed using + # gh workflow run publish.yml --ref + # gh run list --workflow publish.yml + workflow_dispatch: + repository_dispatch: + +env: + MISE_EXPERIMENTAL: true + +jobs: + ci: + runs-on: ubuntu-latest + timeout-minutes: 10 + steps: + - uses: actions/checkout@v6 + - uses: jdx/mise-action@v3 + - run: mise run ci-build + pypi-publish: + name: Upload release to PyPI + runs-on: ubuntu-latest + environment: + name: pypi + # cosmetic URL displayed in the GitHub UI for the environment, + # not to be confused with the repository-url used by the pypa/gh-action-pypi-publish action, + # which is the actual URL of the repository where the package is published + url: https://pypi.org/p/nbkp/ + permissions: + id-token: write # IMPORTANT: this permission is mandatory for trusted publishing + steps: + - uses: actions/checkout@v6 + - uses: jdx/mise-action@v3 + - run: mise run ci-build + - name: Publish package distributions to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 \ No newline at end of file diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..8a13577 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,31 @@ +name: release +on: + # Manually trigger the workflow using + # gh workflow run release.yml # --ref + # gh run list --workflow release.yml + workflow_dispatch: + repository_dispatch: + # push: + # branches: + # - main + +jobs: + bump-version: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + with: + # Fetches all history for all branches and tags (non-shallow clone) + # This is required for the mathieudutour/github-tag-action to determine the latest tag and bump it correctly + fetch-depth: 0 + - name: Bump version and push tag + id: tag-version + uses: mathieudutour/github-tag-action@v6.2 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + - name: Create a GitHub release + uses: ncipollo/release-action@v1 + with: + tag: ${{ steps.tag-version.outputs.new_tag }} + name: ${{ steps.tag-version.outputs.new_tag }} + body: ${{ steps.tag-version.outputs.changelog }} \ No newline at end of file diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..8f38a6c --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,38 @@ +name: test + +on: + # Manually trigger the workflow using + # gh workflow run test.yml # --ref + # gh run list --workflow test.yml # --branch + workflow_dispatch: + repository_dispatch: + pull_request: + push: + tags: + - "*" + branches: + - "main" + +# concurrency: +# group: ${{ github.workflow }}-${{ github.ref }} +# cancel-in-progress: true + +concurrency: + # On main/release, we don't want any jobs cancelled so the sha is used to name the group + # On PR branches, we cancel the job if new commits are pushed + # Additionally, we want the workflow for each aggregate project to run independently + # More info: https://stackoverflow.com/a/68422069/253468 + group: ${{ github.workflow }}-${{ (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/master' || github.ref == 'refs/heads/release' ) && format('{0}-main-{1}', github.workflow, github.sha) || format('{0}-main-{1}', github.workflow, github.ref) }} + cancel-in-progress: true + +env: + MISE_EXPERIMENTAL: true + +jobs: + test: + runs-on: ubuntu-latest + timeout-minutes: 10 + steps: + - uses: actions/checkout@v6 + - uses: jdx/mise-action@v3 + - run: mise run ci-test \ No newline at end of file diff --git a/.tool-versions b/.tool-versions deleted file mode 100644 index 53ff29e..0000000 --- a/.tool-versions +++ /dev/null @@ -1 +0,0 @@ -python 3.14.3 diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..66b8507 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,23 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## Architecture + +Detailed overview of the architecture, design patterns, and execution flow: @docs/architecture.md + +## Concepts + +Explanations of key concepts such as volumes, syncs, and the configuration model: @docs/concepts.md + +## Conventions + +Coding conventions, testing practices, and other guidelines for contributing to the codebase: @docs/conventions.md + +## Build & Test Commands + +Instructions on how to run unit and integration tests, as well as formatting and linting checks: @docs/building-and-testing.md + +## Releasing and Publishing + +Instructions on how to create new releases and publish the package to PyPI: @docs/releasing-and-publishing.md \ No newline at end of file diff --git a/Makefile b/Makefile deleted file mode 100644 index 40e8e27..0000000 --- a/Makefile +++ /dev/null @@ -1,41 +0,0 @@ -.PHONY: install test format lint type-check clean build publish help - -help: ## Show this help message - @echo "Available commands:" - @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' - -install: ## Install dependencies - poetry install - -test: ## Run tests - poetry run pytest tests/ -v - -format: ## Format code with black - poetry run black . - -lint: ## Run flake8 linting - poetry run flake8 ssb/ tests/ - -type-check: ## Run mypy type checking - poetry run mypy ssb/ - -check: format lint type-check test ## Run all checks (format, lint, type-check, test) - -clean: ## Clean up build artifacts - rm -rf build/ - rm -rf dist/ - rm -rf *.egg-info/ - find . -type d -name __pycache__ -delete - find . -type f -name "*.pyc" -delete - -build: ## Build the package - poetry build - -publish: ## Publish to PyPI (requires authentication) - poetry publish - -example: ## Run the basic usage example - poetry run python examples/basic_usage.py - -shell: ## Start a Poetry shell - poetry shell diff --git a/README.md b/README.md index 74c94b1..bf1344d 100644 --- a/README.md +++ b/README.md @@ -1,147 +1,160 @@ -# SSB (Simple Safe Backup) - -A secure backup solution for files and directories with optional encryption. - -## Features - -- **Simple Backup**: Easy-to-use backup functionality for files and directories -- **Encryption**: Optional AES encryption for secure backups -- **CLI Interface**: Command-line tool for quick backups and restores -- **Python API**: Programmatic access to backup functionality +# Nomad Backup (nbkp) + +![Stable Version](https://img.shields.io/pypi/v/nbkp?label=stable) +![Pre-release Version](https://img.shields.io/github/v/release/iglootools/nbkp?label=pre-release&include_prereleases&sort=semver) +![Python Versions](https://img.shields.io/pypi/pyversions/nbkp) +![Download Stats](https://img.shields.io/pypi/dm/nbkp) +![GitHub Stars](https://img.shields.io/github/stars/iglootools/nbkp) +![License](https://img.shields.io/github/license/iglootools/nbkp) +![CI Status](https://github.com/iglootools/nbkp/actions/workflows/test.yml/badge.svg?branch=main) + +An rsync-based backup tool for nomadic setups where sources and destinations aren't always available — laptops on the move, removable drives, +home servers behind changing networks. + +Sentinel files ensure backups only run when volumes are genuinely present, with optional btrfs or hard-link snapshots for point-in-time recovery. + +## Main Use Cases + +The tool is primarily designed for the following backup scenarios: +- **Laptop to Server** — back up to your home server whenever you're on the home network +- **Laptop to External Drive** — back up to an external drive whenever it's connected +- **External Drive to Server** — replicate an external drive to your home server when both are available +- **Server to External Drive** — back up your home server to an external drive +- **Easy Setup** - pilot the backups from your laptop, minimal setup on the server (`rsync`, `btrfs`) + +It replaces the rsync shell scripts you'd normally maintain, adding: +- **Volume detection** (through sentinel files) — only runs when sources and destinations are actually available +- **Btrfs and hard-link snapshots** — keeps point-in-time copies so a bad sync can't wipe good backups +- **Declarative config** — one YAML file describes all your backup pairs +- **Structured output** — human-readable for convenience and JSON output for scripting and automation + +Full feature list: [docs/features.md](https://github.com/iglootools/nbkp/blob/main/docs/features.md). + +## Non-Goals + +nbkp is designed around a single orchestrator (typically a laptop) that initiates all syncs. +It intentionally does not support multi-server topologies where data flows directly between remote servers, for several reasons: + +- **SSH credentials are local.** Keys, proxy-jump chains, and connection options in the config describe how the orchestrator reaches each server — not how servers reach each other. Forwarding credentials between servers adds security risk and configuration complexity. +- **Checks and transfers take different paths.** Pre-flight checks (sentinel files, rsync availability, btrfs detection) run from the orchestrator to each server independently, but a server-to-server transfer would bypass the orchestrator entirely — meaning checks can pass while the actual sync fails. +- **Post-sync operations (snapshots, pruning) assume orchestrator access.** Btrfs and hard-link snapshot management connects from the orchestrator to the destination, not from the source server. + +If you need server-to-server replication: +- **Install nbkp on one of the servers** and configure separate syncs from there, treating that server as the orchestrator. +- **Use tools designed for multi-server topologies**: check the [Similar Tools](#similar-tools) section for options that support enterpris-y / multi-host setups. + +## Philosophy + +**Design Principles** +- Laptop-centric workflows +- Changing networks +- Drives being plugged/unplugged +- Backups happening when possible +- Not always-on infrastructure +- Personal homelab / Raspberry Pi setups + +**Implementation Principles** + +No custom storage format, protocol, or encryption — just proven tools composed together: +- **rsync + SSH** — handles the actual file transfer, locally or remotely +- **Plain directories** — files are stored as-is; restoring is just a copy +- **Btrfs snapshots (optional)** — space-efficient point-in-time copies via copy-on-write, with automatic pruning. Each snapshot is a read-only subvolume exposing a plain directory tree +- **Hard-link snapshots (optional)** — alternative to btrfs snapshots, works on any filesystem that supports hard links, but less efficient and more fragile +- **cryptsetup (optional)** — full-volume encryption for backup destinations + +**Nomad backup metaphor** + +A nomad: +- Moves between places +- Sets up temporary camp +- Carries essential belongings +- Adapts to environment +- Relies on what is present + +Which maps to: +- Laptop +- External drive +- Home server +- Network availability +- Mount detection ## Installation -### Using Poetry (Recommended) - -**Requirements:** -- Python 3.13 or higher -- Poetry - -1. Clone the repository: - ```bash - git clone - cd ssbng - ``` - -2. Install dependencies and the package: - ```bash - poetry install - ``` - -3. Activate the virtual environment: - ```bash - poetry shell - ``` - -### Using pip - -```bash -pip install . -``` +See [docs/installation.md](https://github.com/iglootools/nbkp/blob/main/docs/installation.md). ## Usage -### Command Line Interface - -The `ssb` command provides a simple interface for backup operations: - -#### Create a backup: -```bash -# Basic backup -ssb backup /path/to/source /path/to/backup/dir - -# Encrypted backup -ssb backup /path/to/source /path/to/backup/dir --encrypt - -# Named backup -ssb backup /path/to/source /path/to/backup/dir --name my_backup - -# With password -ssb backup /path/to/source /path/to/backup/dir --encrypt --password mypassword -``` +See [docs/usage.md](https://github.com/iglootools/nbkp/blob/main/docs/usage.md). -#### List available backups: -```bash -ssb list-backups /path/to/backup/dir -``` +## Concepts -#### Restore a backup: -```bash -ssb restore backup_name /path/to/backup/dir /path/to/restore/dir -``` +See [docs/concepts.md](https://github.com/iglootools/nbkp/blob/main/docs/concepts.md). -#### Get help: -```bash -ssb --help -ssb backup --help -ssb restore --help -ssb list-backups --help -``` +## Contribute +- [docs/architecture.md](https://github.com/iglootools/nbkp/blob/main/docs/architecture.md) - architecture overview +- [docs/conventions.md](https://github.com/iglootools/nbkp/blob/main/docs/conventions.md) — coding conventions and guidelines +- [docs/setup-development-environment.md](https://github.com/iglootools/nbkp/blob/main/docs/setup-development-environment.md) — development setup +- [docs/building-and-testing.md](https://github.com/iglootools/nbkp/blob/main/docs/building-and-testing.md) — running tests and checks +- [docs/releasing-and-publishing.md](https://github.com/iglootools/nbkp/blob/main/docs/releasing-and-publishing.md) — releases and PyPI publishing -### Python API +## Resources +- [Releases](https://pypi.org/project/nbkp/#history) +- [Issue Tracker](https://github.com/iglootools/nbkp/issues) -```python -from ssb import BackupManager, EncryptionManager +## Related Projects -# Create a backup manager -backup_manager = BackupManager("/path/to/backup/dir") +### Dependencies +- [rsync](https://rsync.samba.org/) — the underlying file synchronization tool +- [btrfs](https://btrfs.wiki.kernel.org/index.php/Main_Page) — for space-efficient point-in-time copies via copy-on-write +- [cryptsetup](https://gitlab.com/cryptsetup/cryptsetup) — for full-volume encryption +- [typer](https://typer.tiangolo.com/) — for building the CLI interface +- [pydantic](https://pydantic.dev/) — for data modeling and validation -# Create a simple backup -backup_path = backup_manager.create_backup("/path/to/source") +### Similar Tools -# Create an encrypted backup -encryption_manager = EncryptionManager.from_password("my_password") -backup_manager = BackupManager("/path/to/backup/dir", encryption_manager) -backup_path = backup_manager.create_backup("/path/to/source") +There are a number of open source backup tools that use rsync, btrfs, or similar principles. This section describes how `nbkp` compares to some of the popular ones. +If you believe that the representation is inaccurate or if there are other tools that should be included in this list, please submit an issue or PR to update this section. -# List backups -backups = backup_manager.list_backups() +#### Rsync-based -# Restore a backup -restored_path = backup_manager.restore_backup("backup_name", "/path/to/restore") -``` +- **[rsnapshot](https://rsnapshot.org/)** — periodic snapshots via rsync + hard links (hourly/daily/weekly/monthly). Designed for server/cron use with no awareness of removable or intermittent targets. Files stored as plain directories. +- **[Back In Time](https://github.com/bit-team/backintime)** — GUI/CLI tool using rsync + hard links with scheduling and encfs encryption. Provides a Qt interface; uses hard links instead of btrfs snapshots; no sentinel-file mechanism for removable drives. +- **[rsync-time-backup](https://github.com/laurent22/rsync-time-backup)** — Time Machine-style shell script using rsync `--link-dest`. Single script, no config file; uses hard links instead of btrfs snapshots; no volume detection. +- **[rdiff-backup](https://rdiff-backup.net/)** — keeps the latest backup as a plain mirror, stores reverse diffs for older versions. Older versions require the tool to reconstruct; no removable-drive awareness. +- **[Dirvish](https://dirvish.org/)** — rotating network backup system using rsync + hard links. Oriented toward server-pull workflows; no removable-drive detection or btrfs support. -## Development +#### Deduplicating -### Setup Development Environment +- **[BorgBackup](https://www.borgbackup.org/)** — chunk-level deduplication with compression and authenticated encryption. Proprietary repository format (not plain directories); requires `borg` on the remote side; no removable-drive detection. +- **[Restic](https://restic.net/)** — content-addressable backups with encryption by default, supporting many backends (local, S3, SFTP, B2). Proprietary format; restoring requires the restic tool; no volume detection. +- **[Duplicity](https://duplicity.us/)** — GPG-encrypted tar volumes with librsync incremental transfers. Not browsable as plain directories; full+incremental chain model; no btrfs integration. +- **[Kopia](https://kopia.io/)** — content-addressable storage with encryption, compression, and both CLI/GUI. Proprietary format; includes an optional scheduling server; no removable-drive or btrfs support. -**Requirements:** -- Python 3.13 or higher +#### Btrfs / snapshot-focused -1. Install Poetry if you haven't already: - ```bash - curl -sSL https://install.python-poetry.org | python3 - - ``` +- **[btrbk](https://github.com/digint/btrbk)** — btrfs-native snapshot management with send/receive for remote transfers. Btrfs-only (no rsync); more sophisticated retention policies (hourly/daily/weekly/monthly); no non-btrfs filesystem support. +- **[Snapper](http://snapper.io/)** — automated btrfs snapshot creation with timeline-based retention and rollback. Local snapshot management only; no rsync or remote transfer; no external backup targets. +- **[Timeshift](https://github.com/linuxmint/timeshift)** — system restore via rsync + hard links or btrfs snapshots. Targets root filesystem for system-level rollback; excludes user data by default; no remote backup. -2. Install dependencies: - ```bash - poetry install - ``` +#### Continuous / real-time -3. Activate the virtual environment: - ```bash - poetry shell - ``` +- **[Syncthing](https://syncthing.net/)** — continuous peer-to-peer file synchronization across devices. Decentralized (no central server); syncs bidirectionally in real time; no snapshots or point-in-time recovery; designed for keeping folders in sync rather than creating backups. +- **[Lsyncd](https://lsyncd.github.io/lsyncd/)** — monitors directories via inotify and triggers rsync (or other tools) on changes. Daemon-based, continuous replication; designed for server-to-server mirroring; no snapshot management or removable-drive awareness. -### Running Tests +#### Cloud / multi-backend -```bash -pytest -``` +- **[Rclone](https://rclone.org/)** — syncs files to and between 70+ cloud and remote backends (S3, SFTP, Google Drive, etc.). Can transfer server-to-server directly; not rsync-based; no btrfs integration or volume detection. -### Code Formatting +#### Bidirectional sync -```bash -black . -``` +- **[Unison](https://github.com/bcpierce00/unison)** — bidirectional file synchronization between two hosts. Detects conflicts; requires Unison on both sides with matching versions; no snapshots or removable-drive awareness. -### Type Checking +#### Enterprise / multi-host -```bash -mypy ssbng/ -``` +- **[Bacula](https://www.bacula.org/) / [Bareos](https://www.bareos.com/)** — enterprise client-server backup with a director, storage daemons, and file daemons across multiple hosts. Full multi-server topology; proprietary catalog and storage format; significant setup complexity. +- **[Amanda](https://www.amanda.org/)** — network backup orchestrating multiple clients from a central server. Designed for tape and disk pools; uses native dump/tar; heavier infrastructure than nbkp targets. ## License -This project is licensed under the MIT License - see the LICENSE file for details. +This project is licensed under the [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) - see the LICENSE file for details. diff --git a/docs/architecture.md b/docs/architecture.md new file mode 100644 index 0000000..1d8a662 --- /dev/null +++ b/docs/architecture.md @@ -0,0 +1,38 @@ +# Architecture + +NBKP is an rsync-based backup tool. The execution flow is: + +``` +CLI (cli.py) → Runner (runner.py) → Check (check.py) + Rsync (rsync.py) + Btrfs (btrfs.py) + Hardlinks (hardlinks.py) + ↓ ↓ ↓ ↓ + SSH (ssh.py) SSH (ssh.py) SSH (ssh.py) SSH (ssh.py) + +``` + +All modules resolve volumes from `Config.volumes[name]` and dispatch on volume type. + +## Key dispatch pattern + +`LocalVolume | RemoteVolume` (the `Volume` union type) is used throughout. Every module that touches the filesystem branches on `isinstance(vol, RemoteVolume)` — local operations use `pathlib`/`subprocess` directly, remote operations go through `ssh.run_remote_command()`. + + +## Sync flow (runner.py) + +1. `check_all_syncs()` — verifies volumes are reachable and sentinel files exist (`.nbkp-vol`, `.nbkp-src`, `.nbkp-dst`) +2. For each active sync, dispatch on `snapshot_mode`: + - **`none`**: `run_rsync()` → done + - **`btrfs`**: `run_rsync()` to `{destination}/latest/` → `create_snapshot()` → optional `prune_snapshots()` + - **`hard-link`**: cleanup orphans → resolve `--link-dest` from previous snapshot → `create_snapshot_dir()` → `run_rsync()` to `{destination}/snapshots/{timestamp}/` → `update_latest_symlink()` → optional `prune_snapshots()` +3. Btrfs syncs write to `{destination}/latest/`; hard-link syncs write directly to `{destination}/snapshots/{ISO8601Z}/`. Both store snapshots under `{destination}/snapshots/`. + +## Rsync command variants (rsync.py) + +- **Local→Local**: direct rsync +- **Local→Remote / Remote→Local**: rsync with `-e "ssh -p PORT -i KEY -o OPT"` +- **Remote→Remote (same server)**: SSH into the server once, run rsync with local paths + +Cross-server remote-to-remote syncs (different SSH endpoints) are not supported. Use two separate syncs through the local machine instead. + +## Config resolution (config.py) + +Search order: explicit path → `$XDG_CONFIG_HOME/nbkp/config.yaml` → `/etc/nbkp/config.yaml`. Raises `ConfigError` on validation failure. diff --git a/docs/building-and-testing.md b/docs/building-and-testing.md new file mode 100644 index 0000000..794baa0 --- /dev/null +++ b/docs/building-and-testing.md @@ -0,0 +1,40 @@ +# Building and Testing + +The unit tests cover the core logic of the tool, while the integration tests exercise the real rsync/SSH/btrfs pipeline against a Docker container. + +Integration tests exercise the real rsync/SSH/btrfs pipeline against a Docker container. + +Additionally, `poetry run nbkp-test` provides helpers to test with manual testing/QA. + +Run automated tests and checks (no external dependencies): +```bash +# mise tasks +mise run check # Run all checks: format + lint + type-check + unit tests +mise run test # Unit tests only (no Docker) +mise run test-integration # Integration tests only (requires Docker) +mise run test-all # Unit + integration tests +mise run format # black +mise run lint # flake8 +mise run type-check # mypy (strict: disallow_untyped_defs) + +# Using Poetry syntax directly +poetry run pytest tests/ --ignore=tests/integration/ -v # Unit tests only (no Docker) +poetry run pytest tests/integration/ -v # Integration tests only (requires Docker) +poetry run pytest tests/ -v # Unit + integration tests +poetry run black . # formatting +poetry run flake8 nbkp/ tests/ # linting +poetry run mypy nbkp/ tests/ # type-checking +poetry run pytest tests/test_ssh.py::TestBuildSshBaseArgs::test_full -v # run a single test +``` + +The integration test suite uses [testcontainers](https://testcontainers-python.readthedocs.io/) and automatically: +- Generates an ephemeral SSH key pair +- Builds and starts a Docker container with SSH, rsync, and a btrfs filesystem +- Runs tests covering local-to-local, local-to-remote, remote-to-local syncs, btrfs snapshots, and status checks +- Tears down the container on completion + +The `check-links` workflow runs a link checker against the documentation to catch broken links. +It is scheduled to run weekly, but can also be triggered manually using `gh workflow run check-links.yml`. + +## Github Config +- The `main` branch is protected against force pushes. \ No newline at end of file diff --git a/docs/concepts.md b/docs/concepts.md new file mode 100644 index 0000000..0f10edd --- /dev/null +++ b/docs/concepts.md @@ -0,0 +1,403 @@ +## Concepts + +## Backup Config + +Expressed in YAML. Sourced from the regular locations (e.g. `/etc/nbkp/config.yaml`, `~/.config/nbkp/config.yaml`, etc) and can also be provided as an argument when calling the backup tool. + +### Sync + +A sync describes a source and destination pair with the relevant config (type of the backup, source and destination paths, server, etc). +Both the source and destination can be local or remote, and can be on removable drives. +The only supported backup type for now is rsync, but other backup types will be added in the future (e.g. git, etc). + +For a sync to be considered active, both the source and the destination must provide a `.nbkp-src` and `.nbkp-dst` file respectively. +For remote sources/destinations, the server must be reachable for the corresponding sync to be active. + +This is to ensure that when using removable drives, both the source and destinations are currently mounted / available to prevent data loss +or backups to the wrong drives. + +For the rsync backup type, the source and the destination can either be a rsync local or a rsync remote volume, and can specify a subdirectory on the volume. + +Individual syncs can be enabled or disabled when calling the backup tool. + +A sync can optionally enable btrfs snapshots, which will be used to perform incremental backups. +This is only supported for local sources and destinations that are on btrfs volumes. + +The latest backup will be stored under ${destination}/latest and snapshots (if enabled and supported) will be stored under ${destination}/snapshots/${iso8601_timestamp}. +When enabled, a new btrfs snapshot is created each time the backup completes. + +The `max-snapshots` field controls the maximum number of snapshots to keep. When set, old snapshots are automatically pruned after each `run`. The `prune` command can also be used to manually prune snapshots. + +```yaml +destination: + volume: usb-drive + btrfs-snapshots: + enabled: true + max-snapshots: 10 # optional, omit for unlimited +``` + +### Hard-Link Snapshots + +A sync can optionally enable hard-link-based snapshots as an alternative to btrfs snapshots. This works on any filesystem that supports hard links (ext4, xfs, btrfs, etc.) but not on FAT/exFAT. + +Unlike btrfs snapshots (which sync to `${destination}/latest/` then snapshot it), hard-link snapshots sync **directly into a new snapshot directory**: + +1. Create `${destination}/snapshots/${timestamp}/` +2. rsync into that directory with `--link-dest=../${previous-snapshot}` (unchanged files are hard-linked, saving disk space) +3. On success: update symlink `${destination}/latest` → `snapshots/${timestamp}` +4. Prune old snapshots with `rm -rf` (no btrfs commands needed) + +**Safety:** The `latest` symlink is only updated after a successful sync. If a sync fails midway, `latest` still points to the previous complete snapshot. Orphaned snapshot directories (from failed syncs) are detected and cleaned up before the next sync. Pruning never removes the snapshot that `latest` points to. + +Only one of `btrfs-snapshots` and `hard-link-snapshots` can be enabled per sync (they are mutually exclusive). + +```yaml +destination: + volume: usb-drive + hard-link-snapshots: + enabled: true + max-snapshots: 10 # optional, omit for unlimited +``` + +### Rsync Local Volume + +A reusable configuration for a local source or destination that can be shared between multiple syncs. + +To be considered active, a local volume must have a `.nbkp-vol` file in the root of the volume. + +### SSH Endpoint + +A reusable configuration for an SSH server that can be shared between multiple remote volumes. +Provides the host, port, user, key, structured connection options, and optional proxy-jump. + +The `proxy-jump` field references another ssh-endpoint by slug, enabling connections through a bastion/jump host. For multi-hop chains, use `proxy-jumps` (a list of endpoint slugs); the two fields are mutually exclusive. Both map to SSH's `-J` flag (comma-separated) and Fabric's nested `gateway` parameter. Circular proxy-jump chains are detected and rejected at config load time. + +The `location` field declares which network location this endpoint is accessible from (e.g. `home`, `office`, `travel`). Used with the `--location` CLI option for endpoint selection (see [Endpoint Filtering](#endpoint-filtering)). + +The `extends` field references another ssh-endpoint by slug, inheriting all its fields. The child endpoint can override any inherited field. Circular extends chains are detected and rejected at config load time. + +```yaml +ssh-endpoints: + bastion: + host: bastion.example.com + user: admin + + nas: + host: nas.internal + user: backup + proxy-jump: bastion + location: home + + # Inherits user, key, port from nas; overrides host and location + nas-public: + extends: nas + host: nas.public.example.com + location: travel +``` + +The `connection-options` field is an optional dictionary of typed SSH connection settings. These map to parameters across SSH (`ssh(1) -o`), Paramiko (`SSHClient.connect()`), and Fabric (`Connection()`). Available options: + +| Field | Default | Description | +|---|---|---| +| `connect-timeout` | `10` | SSH connection timeout in seconds | +| `compress` | `false` | Enable SSH compression | +| `server-alive-interval` | `null` | Keepalive interval in seconds (prevents connection drops) | +| `allow-agent` | `true` | Use SSH agent for key lookup | +| `look-for-keys` | `true` | Search `~/.ssh/` for keys | +| `banner-timeout` | `null` | Wait time for SSH banner | +| `auth-timeout` | `null` | Wait time for auth response | +| `channel-timeout` | `null` | Wait time for channel open (Paramiko/Fabric only) | +| `strict-host-key-checking` | `true` | Verify remote host key | +| `known-hosts-file` | `null` | Custom known hosts file path | +| `forward-agent` | `false` | Enable SSH agent forwarding | +| `disabled-algorithms` | `null` | Disable specific SSH algorithms (Paramiko/Fabric only) | + +Note: `channel-timeout` and `disabled-algorithms` are only used by the Fabric/Paramiko connection path (status checks, btrfs operations). They have no SSH CLI equivalent and do not affect rsync's `-e` option. + +#### Disabling host key verification + +Setting `known-hosts-file: /dev/null` translates to the SSH option `-o UserKnownHostsFile=/dev/null`. SSH normally records and verifies host keys in `~/.ssh/known_hosts`; pointing it to `/dev/null` means every connection starts with an empty known-hosts database. + +Combined with `strict-host-key-checking: false`, SSH will never reject a host based on its key and never persist any host key it sees. This is commonly used for ephemeral or internal hosts (e.g. a NAS behind a bastion) whose keys may change after reprovisioning, where TOFU (trust-on-first-use) verification isn't practical. + +Without `known-hosts-file: /dev/null`, setting only `strict-host-key-checking: false` would still write new host keys to `~/.ssh/known_hosts`, which could later cause "host key changed" warnings if the key rotates and strict checking is re-enabled. + +```yaml +ssh-endpoints: + nas: + host: nas.internal + proxy-jump: bastion + connection-options: + strict-host-key-checking: false + known-hosts-file: /dev/null +``` + +### Rsync Remote Volume + +A reusable configuration for a remote source or destination that can be shared between multiple syncs. +References an SSH endpoint by name and provides the path to the remote volume. + +A remote volume must declare a primary endpoint via `ssh-endpoint`. It can optionally declare additional endpoints via `ssh-endpoints` (a list of endpoint slugs). When multiple endpoints are declared, the tool selects the best one based on endpoint filtering options (see [Endpoint Filtering](#endpoint-filtering)). + +```yaml +volumes: + nas-backup: + type: remote + ssh-endpoint: nas # primary (required) + ssh-endpoints: # optional, additional candidates + - nas + - nas-public + path: /volume1/backups +``` + +To be considered active, a remote volume must have a `.nbkp-vol` file in the root of the volume, and the selected endpoint must be reachable. + +### Rsync Options + +By default, every sync uses the following rsync flags: `-a --delete --delete-excluded --partial-dir=.rsync-partial --safe-links --checksum`. The `rsync-options` section lets you customise flags per sync: + +```yaml +syncs: + my-sync: + rsync-options: + compress: true # default false — adds --compress + checksum: false # default true — adds --checksum + default-options-override: # replaces the default flags entirely + - "-a" + - "--delete" + extra-options: # appends additional flags + - "--progress" +``` + +**`compress`** — enables rsync `--compress` for transfer compression. Useful for remote syncs over slow links. Default: `false`. + +**`checksum`** — enables rsync `--checksum` to compare files by checksum instead of mod-time and size. More reliable but slower. Default: `true`. + +**`default-options-override`** — replaces the default flags entirely. When omitted, the defaults are used unchanged. + +**`extra-options`** — appends additional flags after the defaults (or after `default-options-override` when set). + +When `rsync-options` is omitted entirely, the defaults are used with `checksum: true` and `compress: false`. + +### Filters + +A sync can optionally define rsync filters to control which files are included or excluded during the backup. There are three complementary mechanisms: + +**Structured rules** — `include` / `exclude` dictionaries that are normalized into rsync filter syntax: + +```yaml +filters: + - include: "*.jpg" # becomes "+ *.jpg" + - exclude: "*.tmp" # becomes "- *.tmp" +``` + +**Raw rsync filter strings** — passed directly to rsync's `--filter` option, supporting the full rsync filter syntax: + +```yaml +filters: + - "H .git" # hide .git + - "- __pycache__/" # exclude __pycache__ +``` + +Structured and raw filters can be mixed freely in the same list. They are applied in order as `--filter=RULE` arguments. + +**External filter file** — a path to a file containing rsync filter rules in native rsync syntax, applied via `--filter=merge FILE`: + +```yaml +filter-file: ~/.config/nbkp/filters/photos.rules +``` + +When both inline `filters` and `filter-file` are present, inline filters are applied first, followed by the filter file. + +### Endpoint Filtering + +When a remote volume declares multiple endpoints, the tool selects the best one at runtime. The following CLI options control endpoint selection (available on `check`, `run`, `sh`, `troubleshoot`, and `prune`): + +| Option | Description | +|---|---| +| `--location SLUG` / `-l SLUG` | Prefer endpoints whose `location` field matches the given slug | +| `--private` | Prefer endpoints whose host resolves to private (LAN) IP addresses | +| `--public` | Prefer endpoints whose host resolves to public (WAN) IP addresses | + +Selection logic: +1. Gather candidate endpoints from the volume's `ssh-endpoints` list (or the primary `ssh-endpoint` if no list is declared) +2. Exclude endpoints whose host cannot be DNS-resolved (unreachable) +3. If `--location` is set, prefer endpoints with matching `location` field +4. If `--private` or `--public` is set, prefer endpoints with matching network type +5. If no candidates remain after filtering, fall back to the primary endpoint + +### Example Config + +```yaml +ssh-endpoints: + # Bastion/jump host for reaching internal servers + bastion: + host: bastion.example.com + user: admin + connection-options: + server-alive-interval: 60 # keepalive every 60s + + # SSH connection details for the NAS (via bastion) + nas: + host: nas.internal + port: 5022 # optional, defaults to 22 + user: backup # optional + key: ~/.ssh/nas_ed25519 # optional + proxy-jump: bastion # connect through bastion + location: home # accessible from home network + connection-options: # optional, all fields have defaults + connect-timeout: 30 + strict-host-key-checking: false + known-hosts-file: /dev/null + compress: true + disabled-algorithms: # Paramiko/Fabric only + ciphers: + - aes128-cbc + + # Public endpoint for the same NAS (inherits from nas) + nas-public: + extends: nas # inherits user, key, port, connection-options + host: nas.public.example.com + location: travel # accessible when traveling + +volumes: + # Local volume on a removable drive + laptop: + type: local + path: /mnt/data + + # Local volume on a btrfs filesystem + usb-drive: + type: local + path: /mnt/usb-backup + + # Remote volume with multiple endpoints for location-awareness + nas-backups: + type: remote + ssh-endpoint: nas # primary endpoint (required) + ssh-endpoints: # candidate endpoints for auto-selection + - nas + - nas-public + path: /volume1/backups + + nas-photos: + type: remote + ssh-endpoint: nas + ssh-endpoints: + - nas + - nas-public + path: /volume2/photos + +syncs: + # Local-to-remote sync with filters + photos-to-nas: + source: + volume: laptop + subdir: photos # optional subdirectory on the volume + destination: + volume: nas-photos + subdir: photos-backup + enabled: true # optional, defaults to true + filters: # optional rsync filters + - include: "*.jpg" # structured include rule + - include: "*.png" + - exclude: "*.tmp" # structured exclude rule + - "H .git" # raw rsync filter string + filter-file: ~/.config/nbkp/filters/photos.rules # optional + + # Local-to-local sync with btrfs snapshots + documents-to-usb: + source: + volume: laptop + subdir: documents + destination: + volume: usb-drive + btrfs-snapshots: + enabled: true + max-snapshots: 10 # optional, omit for unlimited + + # Local-to-local sync with hard-link snapshots + music-to-usb: + source: + volume: laptop + subdir: music + destination: + volume: usb-drive + hard-link-snapshots: + enabled: true + max-snapshots: 5 + + # Sync with custom rsync options + music-to-nas: + source: + volume: laptop + subdir: music + destination: + volume: nas-backups + subdir: music-backup + rsync-options: + compress: true # enable --compress for remote sync +``` + +**Location-aware usage:** + +```bash +# At home — prefer private LAN endpoints +nbkp run --config backup.yaml --location home +nbkp run --config backup.yaml --private + +# Traveling — prefer public endpoints +nbkp run --config backup.yaml --location travel +nbkp run --config backup.yaml --public +``` + +## Shell Script Generation (`sh` command) + +The `nbkp sh` command compiles a config into a standalone bash script that performs the same backup operations as `nbkp run`, without requiring Python or the config file at runtime. All paths, SSH options, and rsync arguments are baked into the generated script. + +```bash +# Generate and inspect the script +nbkp sh --config backup.yaml + +# Generate, save to file (made executable), and validate syntax +nbkp sh --config backup.yaml -o backup.sh +bash -n backup.sh # syntax check + +# Run the generated script with flags +./backup.sh --dry-run +./backup.sh -v # verbose +./backup.sh -v -v # more verbose +``` + +The generated script supports `--dry-run` (`-n`) and `--verbose` (`-v`, `-vv`, `-vvv`) as runtime arguments — these are not baked in at generation time. + +### Relative paths + +The `--relative-src` and `--relative-dst` flags make local source and/or destination paths relative to the script location. These flags require `--output-file` so the script knows where it lives. Remote volume paths are always absolute (they live on remote hosts). + +```bash +# Store the script next to the backups — destination paths become relative +nbkp sh --config backup.yaml -o /mnt/backups/backup.sh --relative-dst + +# Both source and destination relative +nbkp sh --config backup.yaml -o /mnt/data/backup.sh --relative-src --relative-dst +``` + +The generated script resolves its own directory at runtime via `NBKP_SCRIPT_DIR` and uses it to compute the actual paths. This makes the script portable — it works regardless of where the drive is mounted. + +**What is preserved from `nbkp run`:** +- All rsync command variants (local-to-local, local-to-remote, remote-to-local, remote-to-remote same server) +- SSH options (port, key, `-o` options, proxy jump `-J`) +- Rsync filters and filter-file support +- Btrfs snapshot creation and pruning +- Hard-link snapshots: incremental backups via `--link-dest`, symlink management, and pruning +- Pre-flight checks (volume sentinels, endpoint sentinels) +- Nonzero exit on any sync failure + +**What is dropped:** +- Rich console output (spinners, progress bars) — replaced with simple log messages +- JSON output mode +- Python runtime / config parsing — all values are hardcoded +- Paramiko-only SSH options (`channel_timeout`, `disabled_algorithms`) — no `ssh` CLI equivalent + +Disabled syncs appear in the generated script as commented-out blocks, allowing users to re-enable them by uncommenting. diff --git a/docs/conventions.md b/docs/conventions.md new file mode 100644 index 0000000..2ec3476 --- /dev/null +++ b/docs/conventions.md @@ -0,0 +1,63 @@ +# Conventions + +## General Coding Conventions +- **Functional Style**: + - Prefer functional programming style over procedural style. Use pure functions and avoid side effects when possible. +- **Charsets**: + - UTF-8 everywhere. +- **Time Management** + - UTC for all timestamps + - Do not generate the current timestamps directly inside the core logic: pass the timestamps from the higher-level functions, tests, and other entry points. +- **Mocks** + - Avoid use of mocks when the values can be passed as a parameter (e.g. time) +- **Console Output** + - Do not hardcode indents in strings, compute the indent at the call site +- **Version Management** + - Pin specific versions of all dependencies or use a lock file (e.g. poetry.lock) to ensure reproducible builds and avoid issues with breaking changes in dependencies. + + ```bash + # examples + mise use --pin pipx:poetry + ``` +- **Github Workflows** + - Whenever safe (i.e. not affecting production), enable `workflow_dispatch` and `repository_dispatch` to allow manual triggering of workflows from the GitHub UI or CLI, which is useful for testing and debugging. + - Use OpenID Connect (OIDC) authentication for publishing to PyPI, and set up a separate workflow for testing releases to Test PyPI. This allows testing the release and publish process without affecting the real PyPI index, and provides more detailed logs for debugging. +- **Command Line** + - When calling external commands, build the command lines as lists of arguments instead of strings to avoid issues with quoting and escaping. +- **Testability** + - Expose exceptions/errors as structured data classes and perform the assertions on the structured output in tests instead of matching against raw error message strings. This allows for more robust tests that are not brittle to changes in error message formatting. + +## General Python Coding Conventions +- **Typing**: Use type annotations for all functions and methods, including return types. Use `mypy` for static type checking. +- **Data Classes**: + - All serialized model objects are frozen pydantic dataclasses, immutable once created. + - Other data classes should also be frozen. +- **Formatting**: + - 79 characters (black + flake8). +- **Python Version**: 3.14 (mypy target and black target). +- **Control Flow** + - Prefer match-case over if-elif-else chains + - Prefer comprehensions and built-ins (map, filter) over manual loops when appropriate. + - Avoid `continue` in loops, and prefer filtering with comprehensions or built-ins instead. + - Prefer explicit if/else syntax over implicit else + +## Application-Specific Coding Conventions +- **Naming Conventions** + - `kebab-case` for CLI commands and config keys +- **CLI** + - Use `typer` for CLI implementation (argument parsing, formatting, etc.) + - Provide both human-readable and JSON output formats for all commands, with human-readable as the default. + - Provide ability to pass a config file to all commands + - Provide a dry-run parameter for all data-mutating or long-running operations +- **Testing** + - No real rsync/ssh/btrfs calls in unit tests - use mocks instead. Docker-enabled integration tests cover the real interactions. + - Generate YAML test data using the Pydantic data models and `model.model_dump()` instead of hardcoding YAML strings. + This ensures the test data is always valid and consistent with the models. +- **Domain Logic Consistency** + - When making changes to the config schema/models or status checks, make sure to update: + - The `testcli` CLI app to generate new test data that reflects the changes, and update the expected outputs in `testdata.py` if necessary. + - The `cli` CLI app to support the new functionality, and update the formatting logic in `outputs.py` if necessary. + - `sh` command: + - Ensure to add comments in the codebase to describe which choices have been made with regard to which of the original (`run`) functionality has been preserved vs dropped + - When adding functionality to the `run` command, make sure to also add it to the `sh` command, or explicitly document why it's not applicable. + - When adding a dependency on an external tool (e.g. `stat`, `findfmt`), add a check for the tool in the CLI app and provide a clear error message if it's not found. diff --git a/docs/features.md b/docs/features.md new file mode 100644 index 0000000..76c9d52 --- /dev/null +++ b/docs/features.md @@ -0,0 +1,18 @@ +# Features + +TODO + +- compute dependencies between syncs when sync endpoints are shared between writers and readers + +## Commands + +- run (with support for dry run) +- status (list the active syncs and volumes) + +TODO + +## Outputs + +All commands provide the following outputs: +- Human-readable logs (default) +- JSON \ No newline at end of file diff --git a/docs/installation.md b/docs/installation.md new file mode 100644 index 0000000..1d0fa86 --- /dev/null +++ b/docs/installation.md @@ -0,0 +1,42 @@ +# Installation + +## Install with pipx + +[pipx](https://pipx.pypa.io/) installs CLI tools in isolated environments, keeping your system Python clean: + +```bash +pipx install nbkp +``` + +To upgrade to the latest version: + +```bash +pipx upgrade nbkp +``` + +## Shell Completion + +nbkp supports tab completion for Bash, Zsh, Fish, and PowerShell. + +Install completion for your current shell: + +```bash +nbkp --install-completion +``` + +Or target a specific shell: + +```bash +nbkp --install-completion bash +nbkp --install-completion zsh +nbkp --install-completion fish +nbkp --install-completion powershell +``` + +To preview the completion script without installing it: + +```bash +nbkp --show-completion +``` + +Restart your shell (or source the relevant config file) for completions to take effect. diff --git a/docs/releasing-and-publishing.md b/docs/releasing-and-publishing.md new file mode 100644 index 0000000..1dc3bb2 --- /dev/null +++ b/docs/releasing-and-publishing.md @@ -0,0 +1,22 @@ +# Releasing and Publishing + +The build uses the [poetry-dynamic-versioning](https://github.com/mtkennerly/poetry-dynamic-versioning) plugin +to automatically set the version based on git tags. + +The following GitHub workflows are set up to automate the release and publishing process: +1. The `release` workflow takes care of pushing a tag based on conventional commits and creating the Github release. + - This workflow uses the [github-tag](https://github.com/marketplace/actions/github-tag) action + - It is triggered manually using `gh workflow run release.yml` +2. The `publish` workflow takes care of publishing the package to PyPI + - This workflow uses the [pypa/gh-action-pypi-publish](https://github.com/pypa/gh-action-pypi-publish) action + - It is triggerred automatically when the `release` workflow completes, + - But it can also be re-triggered manually if needed using `gh workflow run publish.yml --ref ` + +## PyPI Config + +[PyPI](https://pypi.org/) and [Test PyPI](https://test.pypi.org/) have been configured to allow the `nbkp` project to be published using OpenID Connect (OIDC) authentication: +- Github project name: `iglootools/nbkp` +- Workflow: `publish.yml` +- Github Environment: `pypi` for production releases (to PyPI), `testpypi` for testing releases (to Test PyPI) + +Check [Publishing to PyPI with a Trusted Publisher](https://docs.pypi.org/trusted-publishers/) for more details on OIDC authentication. \ No newline at end of file diff --git a/docs/setup-development-environment.md b/docs/setup-development-environment.md new file mode 100644 index 0000000..9aa6ed6 --- /dev/null +++ b/docs/setup-development-environment.md @@ -0,0 +1,20 @@ +# Setup Development Environment + +1. [Install and activate mise](https://mise.jdx.dev/installing-mise.html) + +2. Install Docker Desktop (or Docker Engine on Linux) + +3. Configure github CLI with `gh auth login` and ensure you have access to the repository (optional, for convenience). + +4. Activate the virtual environment: + ```bash + # - Install all the tools defined in mise.toml + # - Set up the .venv with the correct Python version + mise install + + # vscode and poetry should automatically detect and use the .venv created by mise + poetry install + + # To recreate the virtualenv from scratch: + poetry env remove --all + ``` \ No newline at end of file diff --git a/docs/usage.md b/docs/usage.md new file mode 100644 index 0000000..917c310 --- /dev/null +++ b/docs/usage.md @@ -0,0 +1,19 @@ +# Usage + +## CLI + +TODO: add CLI usage examples here. + +TODO: add system requirements: +- python 3.14 +- rsync +- btrfs-tools (if using btrfs snapshots) + +### Get help: +```bash +nbkp --help +``` + +## Python API + +TODO: add Python API usage examples here. diff --git a/examples/basic_usage.py b/examples/basic_usage.py deleted file mode 100644 index e4fcafd..0000000 --- a/examples/basic_usage.py +++ /dev/null @@ -1,96 +0,0 @@ -#!/usr/bin/env python3 -""" -Basic usage example for SSB (Simple Safe Backup). -""" - -import tempfile -from pathlib import Path -from ssb import BackupManager, EncryptionManager - - -def main(): - """Demonstrate basic backup functionality.""" - print("SSB (Simple Safe Backup) - Basic Usage Example") - print("=" * 50) - - # Create temporary directories for demonstration - with tempfile.TemporaryDirectory() as temp_dir: - temp_path = Path(temp_dir) - - # Create some test files - source_file = temp_path / "important_document.txt" - source_file.write_text( - "This is a very important document that needs backup!" - ) - - source_dir = temp_path / "project_files" - source_dir.mkdir() - (source_dir / "main.py").write_text("print('Hello, World!')") - (source_dir / "config.json").write_text('{"version": "1.0.0"}') - (source_dir / "README.md").write_text( - "# My Project\n\nThis is my project." - ) - - # Create backup directory - backup_dir = temp_path / "backups" - backup_dir.mkdir() - - print(f"Created test files in: {temp_path}") - print(f"Backup directory: {backup_dir}") - print() - - # 1. Simple backup - print("1. Creating simple backup...") - backup_manager = BackupManager(str(backup_dir)) - - # Backup a file - file_backup_path = backup_manager.create_backup(str(source_file)) - print(f" File backup created: {file_backup_path}") - - # Backup a directory - dir_backup_path = backup_manager.create_backup(str(source_dir)) - print(f" Directory backup created: {dir_backup_path}") - - # List backups - backups = backup_manager.list_backups() - print(f" Available backups: {backups}") - print() - - # 2. Encrypted backup - print("2. Creating encrypted backup...") - encryption_manager = EncryptionManager.from_password( - "my_secure_password" - ) - secure_backup_manager = BackupManager( - str(backup_dir), encryption_manager - ) - - # Create encrypted backup with custom name - secure_backup_path = secure_backup_manager.create_backup( - str(source_file), "encrypted_document" - ) - print(f" Encrypted backup created: {secure_backup_path}") - print() - - # 3. Restore example - print("3. Restoring backup...") - restore_dir = temp_path / "restored" - restore_dir.mkdir() - - restored_path = backup_manager.restore_backup( - "important_document.txt", - str(restore_dir / "restored_document.txt"), - ) - print(f" Backup restored to: {restored_path}") - - # Verify restoration - restored_content = Path(restored_path).read_text() - print(f" Restored content: {restored_content}") - print() - - print("Example completed successfully!") - print("All files and directories were created in temporary locations.") - - -if __name__ == "__main__": - main() diff --git a/mise.lock b/mise.lock new file mode 100644 index 0000000..b60214a --- /dev/null +++ b/mise.lock @@ -0,0 +1,17 @@ +[[tools.github-cli]] +version = "2.87.2" +backend = "aqua:cli/cli" +"platforms.macos-arm64" = { checksum = "sha256:18d90c7ae5b462d47cc665d4c59de61af1e208edb81a9abee1b5a82c94e91d94", url = "https://github.com/cli/cli/releases/download/v2.87.2/gh_2.87.2_macOS_arm64.zip"} + +[[tools.poetry]] +version = "2.3.2" +backend = "vfox:mise-plugins/vfox-poetry" + +[[tools.python]] +version = "3.14.3" +backend = "core:python" +"platforms.linux-arm64" = { checksum = "sha256:e9b0aef9baafb6b9e7a4d47b82d6d9778eeafb2c95d23fb5247d3a5f8e52c5a5", url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260211/cpython-3.14.3+20260211-aarch64-unknown-linux-gnu-install_only_stripped.tar.gz"} +"platforms.linux-x64" = { checksum = "sha256:759457004082459a402f369225b82565d88ca8257d9fd11c642a1c76ab0cb1cc", url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260211/cpython-3.14.3+20260211-x86_64-unknown-linux-gnu-install_only_stripped.tar.gz"} +"platforms.macos-arm64" = { checksum = "sha256:348647e4c13b662f7b0d218ccf472688038679815fb1a429ca664b7dce324237", url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260211/cpython-3.14.3+20260211-aarch64-apple-darwin-install_only_stripped.tar.gz"} +"platforms.macos-x64" = { checksum = "sha256:107c71b272b5eeecd7b7b607c4fac0796b0f221bc3391e6155a349789cc7eb17", url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260211/cpython-3.14.3+20260211-x86_64-apple-darwin-install_only_stripped.tar.gz"} +"platforms.windows-x64" = { checksum = "sha256:dc5feea0e16807e7c7b2d20af3f2c18c7153f9cbd4b54063172553fda60c5a1f", url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260211/cpython-3.14.3+20260211-x86_64-pc-windows-msvc-install_only_stripped.tar.gz"} diff --git a/mise.toml b/mise.toml new file mode 100644 index 0000000..009ac0c --- /dev/null +++ b/mise.toml @@ -0,0 +1,84 @@ +[tools] +python = "3.14.3" +poetry = "latest" +github-cli = "latest" + +[env] +_.python.venv = { path = ".venv", create = true } + +[settings] +lockfile = true + +[tasks.install] +description = "Install dependencies" +run = "poetry install" + +[tasks.test] +description = "Run unit tests (no Docker)" +run = "poetry run pytest tests/ --ignore=tests/integration/ -v" + +[tasks.test-integration] +description = "Run integration tests (requires Docker)" +run = "poetry run pytest tests/integration/ -v" + +[tasks.test-all] +description = "Run all tests" +run = "poetry run pytest tests/ -v" + +[tasks.format] +description = "Format code with black" +run = "poetry run black ." + +[tasks.lint] +description = "Run flake8 linting" +run = "poetry run flake8 nbkp/ tests/" + +[tasks.type-check] +description = "Run mypy type checking" +run = "poetry run mypy nbkp/" + +[tasks.check] +description = "Run all checks (format, lint, type-check, test)" +depends = ["format", "lint", "type-check", "test"] + +[tasks.clean] +description = "Clean up build artifacts" +run = """ +rm -rf build/ dist/ *.egg-info/ +find . -type d -name __pycache__ -delete +find . -type f -name "*.pyc" -delete +""" + +[tasks.build] +description = "Build the package" +run = "poetry build" + +[tasks.publish] +description = "Publish to PyPI (requires authentication)" +run = "poetry publish" + +[tasks.example] +description = "Run the basic usage example" +run = "poetry run python examples/basic_usage.py" + +[tasks.shell] +description = "Start a Poetry shell" +run = "poetry shell" + + +# Tasks used by CI workflows +[tasks.ci-test] +description = "Run all checks needed on CI" +run = [ + {task = "install"}, + {task = "build"}, + {task = "test-all"}, +] + +[tasks.ci-build] +description = "Run all checks needed on CI" +run = [ + {task = "install"}, + {task = "build"}, +] + diff --git a/nbkp/__init__.py b/nbkp/__init__.py new file mode 100644 index 0000000..6104386 --- /dev/null +++ b/nbkp/__init__.py @@ -0,0 +1,3 @@ +"""Nomad Backup (NBKP) - An rsync-based backup tool.""" + +__version__ = "0.1.0" diff --git a/nbkp/check.py b/nbkp/check.py new file mode 100644 index 0000000..ef8bfcb --- /dev/null +++ b/nbkp/check.py @@ -0,0 +1,489 @@ +"""Runtime status types for volumes and syncs, and activity checks.""" + +from __future__ import annotations + +import enum +import shutil +import subprocess +from pathlib import Path +from typing import Callable + +from pydantic import BaseModel, computed_field + +from .config import ( + Config, + LocalVolume, + RemoteVolume, + ResolvedEndpoints, + SyncConfig, + Volume, +) +from .remote import run_remote_command + + +class VolumeReason(str, enum.Enum): + SENTINEL_NOT_FOUND = ".nbkp-vol volume sentinel not found" + UNREACHABLE = "unreachable" + + +class SyncReason(str, enum.Enum): + DISABLED = "disabled" + SOURCE_UNAVAILABLE = "source unavailable" + DESTINATION_UNAVAILABLE = "destination unavailable" + SOURCE_SENTINEL_NOT_FOUND = ".nbkp-src source sentinel not found" + DESTINATION_SENTINEL_NOT_FOUND = ".nbkp-dst destination sentinel not found" + SOURCE_LATEST_NOT_FOUND = "source latest/ directory not found" + SOURCE_SNAPSHOTS_DIR_NOT_FOUND = "source snapshots/ directory not found" + RSYNC_NOT_FOUND_ON_SOURCE = "rsync not found on source" + RSYNC_NOT_FOUND_ON_DESTINATION = "rsync not found on destination" + BTRFS_NOT_FOUND_ON_DESTINATION = "btrfs not found on destination" + STAT_NOT_FOUND_ON_DESTINATION = "stat not found on destination" + FINDMNT_NOT_FOUND_ON_DESTINATION = "findmnt not found on destination" + DESTINATION_NOT_BTRFS = "destination not on btrfs filesystem" + DESTINATION_NOT_BTRFS_SUBVOLUME = ( + "destination endpoint is not a btrfs subvolume" + ) + DESTINATION_NOT_MOUNTED_USER_SUBVOL_RM = ( + "destination not mounted with user_subvol_rm_allowed" + ) + DESTINATION_LATEST_NOT_FOUND = "destination latest/ directory not found" + DESTINATION_SNAPSHOTS_DIR_NOT_FOUND = ( + "destination snapshots/ directory not found" + ) + DESTINATION_NO_HARDLINK_SUPPORT = ( + "destination filesystem does not support hard links" + ) + + +class VolumeStatus(BaseModel): + """Runtime status of a volume.""" + + slug: str + config: Volume + reasons: list[VolumeReason] + + @computed_field # type: ignore[prop-decorator] + @property + def active(self) -> bool: + return len(self.reasons) == 0 + + +class SyncStatus(BaseModel): + """Runtime status of a sync.""" + + slug: str + config: SyncConfig + source_status: VolumeStatus + destination_status: VolumeStatus + reasons: list[SyncReason] + + @computed_field # type: ignore[prop-decorator] + @property + def active(self) -> bool: + return len(self.reasons) == 0 + + +def check_volume( + volume: Volume, + resolved_endpoints: ResolvedEndpoints | None = None, +) -> VolumeStatus: + """Check if a volume is active.""" + re = resolved_endpoints or {} + match volume: + case LocalVolume(): + return _check_local_volume(volume) + case RemoteVolume(): + return _check_remote_volume(volume, re) + + +def _check_local_volume(volume: LocalVolume) -> VolumeStatus: + """Check if a local volume is active (.nbkp-vol sentinel exists).""" + sentinel = Path(volume.path) / ".nbkp-vol" + reasons: list[VolumeReason] = ( + [] if sentinel.exists() else [VolumeReason.SENTINEL_NOT_FOUND] + ) + return VolumeStatus( + slug=volume.slug, + config=volume, + reasons=reasons, + ) + + +def _check_remote_volume( + volume: RemoteVolume, + resolved_endpoints: ResolvedEndpoints, +) -> VolumeStatus: + """Check if a remote volume is active (SSH + .nbkp-vol sentinel).""" + ep = resolved_endpoints[volume.slug] + sentinel_path = f"{volume.path}/.nbkp-vol" + result = run_remote_command( + ep.server, ["test", "-f", sentinel_path], ep.proxy_chain + ) + reasons: list[VolumeReason] = ( + [] if result.returncode == 0 else [VolumeReason.UNREACHABLE] + ) + return VolumeStatus( + slug=volume.slug, + config=volume, + reasons=reasons, + ) + + +def _check_endpoint_sentinel( + volume: Volume, + subdir: str | None, + sentinel_name: str, + resolved_endpoints: ResolvedEndpoints, +) -> bool: + """Check if an endpoint sentinel file exists.""" + if subdir: + rel_path = f"{volume.path}/{subdir}/{sentinel_name}" + else: + rel_path = f"{volume.path}/{sentinel_name}" + + match volume: + case LocalVolume(): + return Path(rel_path).exists() + case RemoteVolume(): + ep = resolved_endpoints[volume.slug] + result = run_remote_command( + ep.server, ["test", "-f", rel_path], ep.proxy_chain + ) + return result.returncode == 0 + + +def _check_command_available( + volume: Volume, + command: str, + resolved_endpoints: ResolvedEndpoints, +) -> bool: + """Check if a command is available on the volume's host.""" + match volume: + case LocalVolume(): + return shutil.which(command) is not None + case RemoteVolume(): + ep = resolved_endpoints[volume.slug] + result = run_remote_command( + ep.server, ["which", command], ep.proxy_chain + ) + return result.returncode == 0 + + +def _check_btrfs_filesystem( + volume: Volume, + resolved_endpoints: ResolvedEndpoints, +) -> bool: + """Check if the volume path is on a btrfs filesystem.""" + cmd = ["stat", "-f", "-c", "%T", volume.path] + match volume: + case LocalVolume(): + result = subprocess.run( + cmd, + capture_output=True, + text=True, + ) + case RemoteVolume(): + ep = resolved_endpoints[volume.slug] + result = run_remote_command(ep.server, cmd, ep.proxy_chain) + return result.returncode == 0 and result.stdout.strip() == "btrfs" + + +_NO_HARDLINK_FILESYSTEMS = {"vfat", "msdos", "exfat"} + + +def _check_hardlink_support( + volume: Volume, + resolved_endpoints: ResolvedEndpoints, +) -> bool: + """Check if the volume filesystem supports hard links. + + Rejects known non-hardlink filesystems (FAT, exFAT). + """ + cmd = ["stat", "-f", "-c", "%T", volume.path] + match volume: + case LocalVolume(): + result = subprocess.run( + cmd, + capture_output=True, + text=True, + ) + case RemoteVolume(): + ep = resolved_endpoints[volume.slug] + result = run_remote_command(ep.server, cmd, ep.proxy_chain) + if result.returncode != 0: + return True # Cannot determine; assume supported + fs_type = result.stdout.strip() + return fs_type not in _NO_HARDLINK_FILESYSTEMS + + +def _resolve_endpoint(volume: Volume, subdir: str | None) -> str: + """Resolve the full endpoint path for a volume.""" + if subdir: + return f"{volume.path}/{subdir}" + else: + return volume.path + + +def _check_directory_exists( + volume: Volume, + path: str, + resolved_endpoints: ResolvedEndpoints, +) -> bool: + """Check if a directory exists on the volume's host.""" + match volume: + case LocalVolume(): + return Path(path).is_dir() + case RemoteVolume(): + ep = resolved_endpoints[volume.slug] + result = run_remote_command( + ep.server, ["test", "-d", path], ep.proxy_chain + ) + return result.returncode == 0 + + +def _check_btrfs_subvolume( + volume: Volume, + subdir: str | None, + resolved_endpoints: ResolvedEndpoints, +) -> bool: + """Check if the endpoint path is a btrfs subvolume. + + On btrfs, subvolumes always have inode number 256. + """ + path = _resolve_endpoint(volume, subdir) + cmd = ["stat", "-c", "%i", path] + match volume: + case LocalVolume(): + result = subprocess.run( + cmd, + capture_output=True, + text=True, + ) + case RemoteVolume(): + ep = resolved_endpoints[volume.slug] + result = run_remote_command(ep.server, cmd, ep.proxy_chain) + return result.returncode == 0 and result.stdout.strip() == "256" + + +def _check_btrfs_mount_option( + volume: Volume, + option: str, + resolved_endpoints: ResolvedEndpoints, +) -> bool: + """Check if the volume is mounted with a specific mount option.""" + cmd = ["findmnt", "-T", volume.path, "-n", "-o", "OPTIONS"] + match volume: + case LocalVolume(): + result = subprocess.run( + cmd, + capture_output=True, + text=True, + ) + case RemoteVolume(): + ep = resolved_endpoints[volume.slug] + result = run_remote_command(ep.server, cmd, ep.proxy_chain) + if result.returncode != 0: + return False + options = result.stdout.strip().split(",") + return option in options + + +def _check_btrfs_dest( + dst_vol: Volume, + sync: SyncConfig, + has_findmnt: bool, + reasons: list[SyncReason], + resolved_endpoints: ResolvedEndpoints, +) -> None: + """Run btrfs filesystem, subvolume, and directory checks.""" + if not _check_btrfs_filesystem(dst_vol, resolved_endpoints): + reasons.append(SyncReason.DESTINATION_NOT_BTRFS) + elif not _check_btrfs_subvolume( + dst_vol, + sync.destination.subdir, + resolved_endpoints, + ): + reasons.append(SyncReason.DESTINATION_NOT_BTRFS_SUBVOLUME) + else: + if has_findmnt and not _check_btrfs_mount_option( + dst_vol, + "user_subvol_rm_allowed", + resolved_endpoints, + ): + reasons.append(SyncReason.DESTINATION_NOT_MOUNTED_USER_SUBVOL_RM) + ep = _resolve_endpoint(dst_vol, sync.destination.subdir) + if not _check_directory_exists( + dst_vol, f"{ep}/latest", resolved_endpoints + ): + reasons.append(SyncReason.DESTINATION_LATEST_NOT_FOUND) + if not _check_directory_exists( + dst_vol, f"{ep}/snapshots", resolved_endpoints + ): + reasons.append(SyncReason.DESTINATION_SNAPSHOTS_DIR_NOT_FOUND) + + +def _check_hard_link_dest( + dst_vol: Volume, + sync: SyncConfig, + reasons: list[SyncReason], + resolved_endpoints: ResolvedEndpoints, +) -> None: + """Run hard-link snapshot filesystem and directory checks.""" + if not _check_hardlink_support(dst_vol, resolved_endpoints): + reasons.append(SyncReason.DESTINATION_NO_HARDLINK_SUPPORT) + ep = _resolve_endpoint(dst_vol, sync.destination.subdir) + if not _check_directory_exists( + dst_vol, f"{ep}/snapshots", resolved_endpoints + ): + reasons.append(SyncReason.DESTINATION_SNAPSHOTS_DIR_NOT_FOUND) + + +def check_sync( + sync: SyncConfig, + config: Config, + volume_statuses: dict[str, VolumeStatus], + resolved_endpoints: ResolvedEndpoints | None = None, +) -> SyncStatus: + """Check if a sync is active, accumulating all failure reasons.""" + re = resolved_endpoints or {} + src_vol_name = sync.source.volume + dst_vol_name = sync.destination.volume + + src_status = volume_statuses[src_vol_name] + dst_status = volume_statuses[dst_vol_name] + + if not sync.enabled: + return SyncStatus( + slug=sync.slug, + config=sync, + source_status=src_status, + destination_status=dst_status, + reasons=[SyncReason.DISABLED], + ) + else: + reasons: list[SyncReason] = [] + + src_vol = config.volumes[src_vol_name] + dst_vol = config.volumes[dst_vol_name] + + # Volume availability + if not src_status.active: + reasons.append(SyncReason.SOURCE_UNAVAILABLE) + + if not dst_status.active: + reasons.append(SyncReason.DESTINATION_UNAVAILABLE) + + # Source checks (only if source volume is active) + if src_status.active: + if not _check_endpoint_sentinel( + src_vol, + sync.source.subdir, + ".nbkp-src", + re, + ): + reasons.append(SyncReason.SOURCE_SENTINEL_NOT_FOUND) + if not _check_command_available(src_vol, "rsync", re): + reasons.append(SyncReason.RSYNC_NOT_FOUND_ON_SOURCE) + if sync.source.snapshot_mode != "none": + src_ep = _resolve_endpoint(src_vol, sync.source.subdir) + if not _check_directory_exists( + src_vol, f"{src_ep}/latest", re + ): + reasons.append(SyncReason.SOURCE_LATEST_NOT_FOUND) + if not _check_directory_exists( + src_vol, f"{src_ep}/snapshots", re + ): + reasons.append(SyncReason.SOURCE_SNAPSHOTS_DIR_NOT_FOUND) + + # Destination checks (only if destination volume is active) + if dst_status.active: + if not _check_endpoint_sentinel( + dst_vol, + sync.destination.subdir, + ".nbkp-dst", + re, + ): + reasons.append(SyncReason.DESTINATION_SENTINEL_NOT_FOUND) + if not _check_command_available(dst_vol, "rsync", re): + reasons.append(SyncReason.RSYNC_NOT_FOUND_ON_DESTINATION) + if sync.destination.btrfs_snapshots.enabled: + if not _check_command_available(dst_vol, "btrfs", re): + reasons.append(SyncReason.BTRFS_NOT_FOUND_ON_DESTINATION) + else: + has_stat = _check_command_available(dst_vol, "stat", re) + has_findmnt = _check_command_available( + dst_vol, "findmnt", re + ) + + if not has_stat: + reasons.append( + SyncReason.STAT_NOT_FOUND_ON_DESTINATION + ) + if not has_findmnt: + reasons.append( + SyncReason.FINDMNT_NOT_FOUND_ON_DESTINATION + ) + + if has_stat: + _check_btrfs_dest( + dst_vol, + sync, + has_findmnt, + reasons, + re, + ) + elif sync.destination.hard_link_snapshots.enabled: + has_stat = _check_command_available(dst_vol, "stat", re) + if not has_stat: + reasons.append(SyncReason.STAT_NOT_FOUND_ON_DESTINATION) + else: + _check_hard_link_dest(dst_vol, sync, reasons, re) + + return SyncStatus( + slug=sync.slug, + config=sync, + source_status=src_status, + destination_status=dst_status, + reasons=reasons, + ) + + +def check_all_syncs( + config: Config, + on_progress: Callable[[str], None] | None = None, + only_syncs: list[str] | None = None, + resolved_endpoints: ResolvedEndpoints | None = None, +) -> tuple[dict[str, VolumeStatus], dict[str, SyncStatus]]: + """Check volumes and syncs, caching volume checks. + + When *only_syncs* is given, only those syncs (and the + volumes they reference) are checked. + """ + re = resolved_endpoints or {} + syncs = ( + {s: sc for s, sc in config.syncs.items() if s in only_syncs} + if only_syncs + else config.syncs + ) + + needed_volumes: set[str] = ( + {sc.source.volume for sc in syncs.values()} + | {sc.destination.volume for sc in syncs.values()} + if only_syncs + else set(config.volumes.keys()) + ) + + volume_statuses: dict[str, VolumeStatus] = {} + for slug in needed_volumes: + volume = config.volumes[slug] + volume_statuses[slug] = check_volume(volume, re) + if on_progress: + on_progress(slug) + + sync_statuses: dict[str, SyncStatus] = {} + for slug, sync in syncs.items(): + sync_statuses[slug] = check_sync(sync, config, volume_statuses, re) + if on_progress: + on_progress(slug) + + return volume_statuses, sync_statuses diff --git a/nbkp/cli.py b/nbkp/cli.py new file mode 100644 index 0000000..095ee0f --- /dev/null +++ b/nbkp/cli.py @@ -0,0 +1,704 @@ +"""Typer CLI: run and status commands.""" + +from __future__ import annotations + +import json +import os +import stat +from pathlib import Path +from typing import Annotated, Literal, Optional + +import typer +from rich.console import Console +from rich.progress import Progress, SpinnerColumn, TextColumn + +from .config import ( + Config, + ConfigError, + EndpointFilter, + ResolvedEndpoints, + load_config, + resolve_all_endpoints, +) +from .check import ( + SyncReason, + SyncStatus, + VolumeStatus, + check_all_syncs, +) +from .sync.btrfs import ( + list_snapshots, + prune_snapshots as btrfs_prune_snapshots, +) +from .sync.hardlinks import ( + prune_snapshots as hl_prune_snapshots, +) +from .output import ( + OutputFormat, + print_config_error, + print_human_config, + print_human_prune_results, + print_human_results, + print_human_check, + print_human_troubleshoot, +) +from .scriptgen import ScriptOptions, generate_script +from .sync import ( + ProgressMode, + PruneResult, + SyncResult, + run_all_syncs, +) + +_SENTINEL_ONLY_REASONS = { + SyncReason.SOURCE_SENTINEL_NOT_FOUND, + SyncReason.DESTINATION_SENTINEL_NOT_FOUND, +} + +app = typer.Typer( + name="nbkp", + help="Nomad Backup", + no_args_is_help=True, +) + +config_app = typer.Typer( + name="config", + help="Configuration commands", + no_args_is_help=True, +) +app.add_typer(config_app) + + +@app.command() +def check( + config: Annotated[ + Optional[str], + typer.Option("--config", "-c", help="Path to config file"), + ] = None, + output: Annotated[ + OutputFormat, + typer.Option("--output", "-o", help="Output format"), + ] = OutputFormat.HUMAN, + strict: Annotated[ + bool, + typer.Option( + "--strict/--no-strict", + help=( + "Exit non-zero on any inactive sync," + " including missing sentinels" + ), + ), + ] = False, + locations: Annotated[ + Optional[list[str]], + typer.Option( + "--locations", + "-l", + help="Prefer endpoints at these locations", + ), + ] = None, + private: Annotated[ + bool, + typer.Option( + "--private", + help="Prefer private (LAN) endpoints", + ), + ] = False, + public: Annotated[ + bool, + typer.Option( + "--public", + help="Prefer public (WAN) endpoints", + ), + ] = False, +) -> None: + """Check status of volumes and syncs.""" + cfg = _load_config_or_exit(config) + resolved = _resolve_endpoints(cfg, locations, private, public) + output_format = output + vol_statuses, sync_statuses, has_errors = _check_and_display( + cfg, + output_format, + strict, + resolved_endpoints=resolved, + ) + + if output_format is OutputFormat.JSON: + data = { + "volumes": [v.model_dump() for v in vol_statuses.values()], + "syncs": [s.model_dump() for s in sync_statuses.values()], + } + typer.echo(json.dumps(data, indent=2)) + + if has_errors: + raise typer.Exit(1) + + +@config_app.command() +def show( + config: Annotated[ + Optional[str], + typer.Option("--config", "-c", help="Path to config file"), + ] = None, + output: Annotated[ + OutputFormat, + typer.Option("--output", "-o", help="Output format"), + ] = OutputFormat.HUMAN, +) -> None: + """Show parsed configuration.""" + cfg = _load_config_or_exit(config) + output_format = output + match output_format: + case OutputFormat.JSON: + typer.echo(json.dumps(cfg.model_dump(by_alias=True), indent=2)) + case OutputFormat.HUMAN: + resolved = resolve_all_endpoints(cfg) + print_human_config(cfg, resolved_endpoints=resolved) + + +@app.command() +def run( + config: Annotated[ + Optional[str], + typer.Option("--config", "-c", help="Path to config file"), + ] = None, + dry_run: Annotated[ + bool, + typer.Option("--dry-run", "-n", help="Perform a dry run"), + ] = False, + sync: Annotated[ + Optional[list[str]], + typer.Option("--sync", "-s", help="Sync name(s) to run"), + ] = None, + output: Annotated[ + OutputFormat, + typer.Option("--output", "-o", help="Output format"), + ] = OutputFormat.HUMAN, + progress: Annotated[ + Optional[ProgressMode], + typer.Option( + "--progress", + "-p", + help=("Progress mode: none, overall," " per-file, or full"), + ), + ] = None, + prune: Annotated[ + bool, + typer.Option( + "--prune/--no-prune", + help="Prune old snapshots after sync", + ), + ] = True, + strict: Annotated[ + bool, + typer.Option( + "--strict/--no-strict", + help=( + "Exit non-zero on any inactive sync," + " including missing sentinels" + ), + ), + ] = False, + locations: Annotated[ + Optional[list[str]], + typer.Option( + "--locations", + "-l", + help="Prefer endpoints at these locations", + ), + ] = None, + private: Annotated[ + bool, + typer.Option( + "--private", + help="Prefer private (LAN) endpoints", + ), + ] = False, + public: Annotated[ + bool, + typer.Option( + "--public", + help="Prefer public (WAN) endpoints", + ), + ] = False, +) -> None: + """Run backup syncs.""" + cfg = _load_config_or_exit(config) + resolved = _resolve_endpoints(cfg, locations, private, public) + output_format = output + vol_statuses, sync_statuses, has_errors = _check_and_display( + cfg, + output_format, + strict, + only_syncs=sync, + resolved_endpoints=resolved, + ) + + if has_errors: + if output_format is OutputFormat.JSON: + data = { + "volumes": [v.model_dump() for v in vol_statuses.values()], + "syncs": [s.model_dump() for s in sync_statuses.values()], + "results": [], + } + typer.echo(json.dumps(data, indent=2)) + raise typer.Exit(1) + else: + if output_format is OutputFormat.HUMAN: + typer.echo("") + + use_spinner = output_format is OutputFormat.HUMAN and progress in ( + None, + ProgressMode.NONE, + ) + stream_output = ( + (lambda chunk: typer.echo(chunk, nl=False)) + if output_format is OutputFormat.HUMAN and not use_spinner + else None + ) + + console = Console() + status_display = None + + def on_sync_start(slug: str) -> None: + nonlocal status_display + if use_spinner: + status_display = console.status(f"Syncing {slug}...") + status_display.start() + else: + console.print(f"Syncing {slug}...") + + def on_sync_end(slug: str, result: SyncResult) -> None: + nonlocal status_display + if status_display is not None: + status_display.stop() + status_display = None + icon = "[green]✓[/green]" if result.success else "[red]✗[/red]" + console.print(f"{icon} {slug}") + + results = run_all_syncs( + cfg, + sync_statuses, + dry_run=dry_run, + only_syncs=sync, + progress=progress, + prune=prune, + on_rsync_output=stream_output, + on_sync_start=( + on_sync_start if output_format is OutputFormat.HUMAN else None + ), + on_sync_end=( + on_sync_end if output_format is OutputFormat.HUMAN else None + ), + resolved_endpoints=resolved, + ) + + match output_format: + case OutputFormat.JSON: + data = { + "volumes": [v.model_dump() for v in vol_statuses.values()], + "syncs": [s.model_dump() for s in sync_statuses.values()], + "results": [r.model_dump() for r in results], + } + typer.echo(json.dumps(data, indent=2)) + case OutputFormat.HUMAN: + typer.echo("") + print_human_results(results, dry_run) + + if any(not r.success for r in results): + raise typer.Exit(1) + + +@app.command() +def sh( + config: Annotated[ + Optional[str], + typer.Option("--config", "-c", help="Path to config file"), + ] = None, + output_file: Annotated[ + Optional[str], + typer.Option( + "--output-file", + "-o", + help="Write script to file (made executable)", + ), + ] = None, + relative_src: Annotated[ + bool, + typer.Option( + "--relative-src", + help=( + "Make source paths relative to" + " script location" + " (requires --output-file)" + ), + ), + ] = False, + relative_dst: Annotated[ + bool, + typer.Option( + "--relative-dst", + help=( + "Make destination paths relative to" + " script location" + " (requires --output-file)" + ), + ), + ] = False, + locations: Annotated[ + Optional[list[str]], + typer.Option( + "--locations", + "-l", + help="Prefer endpoints at these locations", + ), + ] = None, + private: Annotated[ + bool, + typer.Option( + "--private", + help="Prefer private (LAN) endpoints", + ), + ] = False, + public: Annotated[ + bool, + typer.Option( + "--public", + help="Prefer public (WAN) endpoints", + ), + ] = False, +) -> None: + """Generate a standalone backup shell script. + + This is useful for deploying to systems without Python, + or auditing what commands will run. + """ + if (relative_src or relative_dst) and output_file is None: + typer.echo( + "Error: --relative-src/--relative-dst" " require --output-file", + err=True, + ) + raise typer.Exit(2) + + cfg = _load_config_or_exit(config) + resolved = _resolve_endpoints(cfg, locations, private, public) + script = generate_script( + cfg, + ScriptOptions( + config_path=config, + output_file=( + os.path.abspath(output_file) if output_file else None + ), + relative_src=relative_src, + relative_dst=relative_dst, + ), + resolved_endpoints=resolved, + ) + if output_file is not None: + path = Path(output_file) + path.write_text(script, encoding="utf-8") + path.chmod(path.stat().st_mode | stat.S_IXUSR | stat.S_IXGRP) + typer.echo(f"Written to {output_file}", err=True) + else: + typer.echo(script) + + +@app.command() +def troubleshoot( + config: Annotated[ + Optional[str], + typer.Option("--config", "-c", help="Path to config file"), + ] = None, + locations: Annotated[ + Optional[list[str]], + typer.Option( + "--locations", + "-l", + help="Prefer endpoints at these locations", + ), + ] = None, + private: Annotated[ + bool, + typer.Option( + "--private", + help="Prefer private (LAN) endpoints", + ), + ] = False, + public: Annotated[ + bool, + typer.Option( + "--public", + help="Prefer public (WAN) endpoints", + ), + ] = False, +) -> None: + """Diagnose issues and show how to fix them.""" + cfg = _load_config_or_exit(config) + resolved = _resolve_endpoints(cfg, locations, private, public) + vol_statuses, sync_statuses = _check_all_with_progress( + cfg, + use_progress=True, + resolved_endpoints=resolved, + ) + print_human_troubleshoot( + vol_statuses, + sync_statuses, + cfg, + resolved_endpoints=resolved, + ) + + +@app.command() +def prune( + config: Annotated[ + Optional[str], + typer.Option("--config", "-c", help="Path to config file"), + ] = None, + sync: Annotated[ + Optional[list[str]], + typer.Option("--sync", "-s", help="Sync name(s) to prune"), + ] = None, + dry_run: Annotated[ + bool, + typer.Option("--dry-run", "-n", help="Perform a dry run"), + ] = False, + output: Annotated[ + OutputFormat, + typer.Option("--output", "-o", help="Output format"), + ] = OutputFormat.HUMAN, + locations: Annotated[ + Optional[list[str]], + typer.Option( + "--locations", + "-l", + help="Prefer endpoints at these locations", + ), + ] = None, + private: Annotated[ + bool, + typer.Option( + "--private", + help="Prefer private (LAN) endpoints", + ), + ] = False, + public: Annotated[ + bool, + typer.Option( + "--public", + help="Prefer public (WAN) endpoints", + ), + ] = False, +) -> None: + """Prune old snapshots beyond max-snapshots limit.""" + cfg = _load_config_or_exit(config) + resolved = _resolve_endpoints(cfg, locations, private, public) + output_format = output + _, sync_statuses = _check_all_with_progress( + cfg, + use_progress=output_format is OutputFormat.HUMAN, + resolved_endpoints=resolved, + ) + + def _is_prunable(slug: str, status: SyncStatus) -> bool: + if sync and slug not in sync: + return False + if not status.active: + return False + dst = status.config.destination + match dst.snapshot_mode: + case "btrfs": + return dst.btrfs_snapshots.max_snapshots is not None + case "hard-link": + return dst.hard_link_snapshots.max_snapshots is not None + case _: + return False + + prunable = [ + (slug, status) + for slug, status in sync_statuses.items() + if _is_prunable(slug, status) + ] + + results: list[PruneResult] = [] + for slug, status in prunable: + dst = status.config.destination + try: + match dst.snapshot_mode: + case "btrfs": + assert dst.btrfs_snapshots.max_snapshots is not None + deleted = btrfs_prune_snapshots( + status.config, + cfg, + dst.btrfs_snapshots.max_snapshots, + dry_run=dry_run, + resolved_endpoints=resolved, + ) + case "hard-link": + assert dst.hard_link_snapshots.max_snapshots is not None + deleted = hl_prune_snapshots( + status.config, + cfg, + dst.hard_link_snapshots.max_snapshots, + dry_run=dry_run, + resolved_endpoints=resolved, + ) + case _: + deleted = [] + remaining = list_snapshots(status.config, cfg, resolved) + results.append( + PruneResult( + sync_slug=slug, + deleted=deleted, + kept=(len(remaining) + (len(deleted) if dry_run else 0)), + dry_run=dry_run, + ) + ) + except RuntimeError as e: + results.append( + PruneResult( + sync_slug=slug, + deleted=[], + kept=0, + dry_run=dry_run, + error=str(e), + ) + ) + + match output_format: + case OutputFormat.JSON: + typer.echo( + json.dumps( + [r.model_dump() for r in results], + indent=2, + ) + ) + case OutputFormat.HUMAN: + print_human_prune_results(results, dry_run) + + if any(r.error for r in results): + raise typer.Exit(1) + + +def _load_config_or_exit( + config_path: str | None, +) -> Config: + """Load config or exit with code 2 on error.""" + try: + return load_config(config_path) + except ConfigError as e: + print_config_error(e) + raise typer.Exit(2) + + +def _build_endpoint_filter( + locations: list[str] | None, + private: bool, + public: bool, +) -> EndpointFilter | None: + """Build an EndpointFilter from CLI options.""" + network: Literal["private", "public"] | None = None + if private: + network = "private" + elif public: + network = "public" + locs = locations or [] + if not locs and network is None: + return None + return EndpointFilter(locations=locs, network=network) + + +def _resolve_endpoints( + cfg: Config, + locations: list[str] | None, + private: bool, + public: bool, +) -> ResolvedEndpoints: + """Build filter and resolve all endpoints once.""" + ef = _build_endpoint_filter(locations, private, public) + return resolve_all_endpoints(cfg, ef) + + +def _check_all_with_progress( + cfg: Config, + use_progress: bool, + only_syncs: list[str] | None = None, + resolved_endpoints: ResolvedEndpoints | None = None, +) -> tuple[dict[str, VolumeStatus], dict[str, SyncStatus]]: + """Run check_all_syncs with an optional progress bar.""" + total = len(cfg.volumes) + len(cfg.syncs) + if not use_progress or total == 0: + return check_all_syncs( + cfg, + only_syncs=only_syncs, + resolved_endpoints=resolved_endpoints, + ) + + with Progress( + SpinnerColumn(), + TextColumn("[progress.description]{task.description}"), + TextColumn("{task.completed}/{task.total}"), + transient=True, + ) as progress: + task = progress.add_task("Checking volumes and syncs...", total=total) + + def on_progress(_slug: str) -> None: + progress.advance(task) + + return check_all_syncs( + cfg, + on_progress=on_progress, + only_syncs=only_syncs, + resolved_endpoints=resolved_endpoints, + ) + + +def _check_and_display( + cfg: Config, + output_format: OutputFormat, + strict: bool, + only_syncs: list[str] | None = None, + resolved_endpoints: ResolvedEndpoints | None = None, +) -> tuple[ + dict[str, VolumeStatus], + dict[str, SyncStatus], + bool, +]: + """Compute statuses, display human output, and check for errors. + + Returns volume statuses, sync statuses, and whether there are + fatal errors. When *only_syncs* is given, only those syncs + (and the volumes they reference) are checked. + """ + vol_statuses, sync_statuses = _check_all_with_progress( + cfg, + use_progress=output_format is OutputFormat.HUMAN, + only_syncs=only_syncs, + resolved_endpoints=resolved_endpoints, + ) + + if output_format is OutputFormat.HUMAN: + print_human_check( + vol_statuses, + sync_statuses, + cfg, + resolved_endpoints=resolved_endpoints, + ) + + if strict: + has_errors = any(not s.active for s in sync_statuses.values()) + else: + has_errors = any( + set(s.reasons) - _SENTINEL_ONLY_REASONS + for s in sync_statuses.values() + ) + + return vol_statuses, sync_statuses, has_errors + + +def main() -> None: + """Main CLI entry point.""" + app() + + +if __name__ == "__main__": + main() diff --git a/nbkp/config/__init__.py b/nbkp/config/__init__.py new file mode 100644 index 0000000..3521678 --- /dev/null +++ b/nbkp/config/__init__.py @@ -0,0 +1,47 @@ +"""Configuration types and loading.""" + +from .loader import ConfigError, find_config_file, load_config +from .protocol import ( + BtrfsSnapshotConfig, + Config, + DestinationSyncEndpoint, + EndpointFilter, + HardLinkSnapshotConfig, + LocalVolume, + RemoteVolume, + RsyncOptions, + SshEndpoint, + Slug, + SshConnectionOptions, + SyncConfig, + SyncEndpoint, + Volume, +) +from .resolution import ( + ResolvedEndpoint, + ResolvedEndpoints, + resolve_all_endpoints, +) + +__all__ = [ + "BtrfsSnapshotConfig", + "Config", + "ConfigError", + "DestinationSyncEndpoint", + "EndpointFilter", + "HardLinkSnapshotConfig", + "LocalVolume", + "RemoteVolume", + "RsyncOptions", + "ResolvedEndpoint", + "ResolvedEndpoints", + "SshEndpoint", + "Slug", + "SshConnectionOptions", + "SyncConfig", + "SyncEndpoint", + "Volume", + "find_config_file", + "load_config", + "resolve_all_endpoints", +] diff --git a/nbkp/config/loader.py b/nbkp/config/loader.py new file mode 100644 index 0000000..9108e9f --- /dev/null +++ b/nbkp/config/loader.py @@ -0,0 +1,62 @@ +"""YAML configuration loading, parsing, and validation.""" + +from __future__ import annotations + +import os +from pathlib import Path + +import yaml + +from .protocol import Config + + +class ConfigError(Exception): + """Raised when configuration is invalid.""" + + +def find_config_file(config_path: str | None = None) -> Path: + """Find the configuration file using search order. + + Order: explicit path > XDG_CONFIG_HOME > /etc/nbkp/ + """ + if config_path is not None: + p = Path(config_path) + if not p.is_file(): + raise ConfigError(f"Config file not found: {config_path}") + else: + return p + else: + xdg = os.environ.get( + "XDG_CONFIG_HOME", + os.path.expanduser("~/.config"), + ) + xdg_path = Path(xdg) / "nbkp" / "config.yaml" + etc_path = Path("/etc/nbkp/config.yaml") + if xdg_path.is_file(): + return xdg_path + elif etc_path.is_file(): + return etc_path + else: + raise ConfigError( + "No config file found. Searched: " + f"{xdg_path}, /etc/nbkp/config.yaml" + ) + + +def load_config(config_path: str | None = None) -> Config: + """Load and validate configuration from a YAML file.""" + path = find_config_file(config_path) + try: + with open(path) as f: + raw = yaml.safe_load(f) + except yaml.YAMLError as e: + raise ConfigError(f"Invalid YAML in {path}: {e}") from e + + if not isinstance(raw, dict): + raise ConfigError("Config file must be a YAML mapping") + else: + try: + config = Config.model_validate(raw) + except Exception as e: + raise ConfigError(str(e)) from e + return config diff --git a/nbkp/config/protocol.py b/nbkp/config/protocol.py new file mode 100644 index 0000000..967c9de --- /dev/null +++ b/nbkp/config/protocol.py @@ -0,0 +1,528 @@ +from __future__ import annotations + +from typing import Any, Annotated, Dict, List, Literal, Optional, Union + +from pydantic import ( + BaseModel, + ConfigDict, + Field, + ValidationInfo, + field_validator, + model_validator, +) + + +def _to_kebab(name: str) -> str: + return name.replace("_", "-") + + +class _BaseModel(BaseModel): + model_config = ConfigDict( + alias_generator=_to_kebab, + populate_by_name=True, + ) + + +Slug = Annotated[ + str, + Field( + min_length=1, + max_length=50, + pattern=r"^[a-z0-9]+(-[a-z0-9]+)*$", + ), +] + + +class LocalVolume(_BaseModel): + model_config = ConfigDict(frozen=True) + type: Literal["local"] = "local" + """A local filesystem volume.""" + slug: Slug + path: str = Field(..., min_length=1) + + +class SshConnectionOptions(_BaseModel): + """SSH connection options. + + These fields map to parameters across three layers: + - SSH client: ssh(1) -o options + - Paramiko: SSHClient.connect() kwargs + https://docs.paramiko.org/en/stable/api/client.html + - Fabric: Connection() constructor + https://docs.fabfile.org/en/stable/api/connection.html + """ + + model_config = ConfigDict(frozen=True) + + # Connection + # SSH: ConnectTimeout | Paramiko: timeout | Fabric: connect_timeout + connect_timeout: int = Field(default=10, ge=1) + # SSH: Compression | Paramiko: compress + compress: bool = False + # SSH: ServerAliveInterval | Paramiko: transport.set_keepalive() + server_alive_interval: Optional[int] = Field(default=None, ge=1) + + # Authentication + # Paramiko: allow_agent — use SSH agent for key lookup + allow_agent: bool = True + # Paramiko: look_for_keys — search ~/.ssh/ for keys + look_for_keys: bool = True + + # Timeouts + # Paramiko: banner_timeout — wait for SSH banner + banner_timeout: Optional[float] = Field(default=None, ge=0) + # Paramiko: auth_timeout — wait for auth response + auth_timeout: Optional[float] = Field(default=None, ge=0) + # Paramiko: channel_timeout — wait for channel open + channel_timeout: Optional[float] = Field(default=None, ge=0) + + # Host key verification + # SSH: StrictHostKeyChecking + # Paramiko: SSHClient.set_missing_host_key_policy() + strict_host_key_checking: bool = True + # SSH: UserKnownHostsFile + # Paramiko: SSHClient.load_host_keys() + known_hosts_file: Optional[str] = None + + # Forwarding + # SSH: ForwardAgent | Fabric: forward_agent + forward_agent: bool = False + + # Algorithm restrictions + # Paramiko: disabled_algorithms — disable specific algorithms + # (Paramiko/Fabric only — no SSH CLI equivalent) + disabled_algorithms: Optional[Dict[str, List[str]]] = None + + +class SshEndpoint(_BaseModel): + model_config = ConfigDict(frozen=True) + slug: Slug + host: str = Field(..., min_length=1) + port: int = Field(default=22, ge=1, le=65535) + user: Optional[str] = None + key: Optional[str] = None + connection_options: SshConnectionOptions = Field( + default_factory=lambda: SshConnectionOptions() + ) + proxy_jump: Optional[str] = None + proxy_jumps: Optional[List[str]] = None + location: Optional[str] = None + locations: Optional[List[str]] = None + extends: Optional[str] = None + + @model_validator(mode="after") + def validate_proxy_exclusivity(self) -> SshEndpoint: + if self.proxy_jump is not None and self.proxy_jumps is not None: + raise ValueError( + "proxy-jump and proxy-jumps are mutually exclusive" + ) + return self + + @model_validator(mode="after") + def validate_location_exclusivity(self) -> SshEndpoint: + if self.location is not None and self.locations is not None: + raise ValueError("location and locations are mutually exclusive") + return self + + @property + def proxy_jump_chain(self) -> list[str]: + """Return the proxy-jump chain as a list of slugs.""" + if self.proxy_jumps is not None: + return list(self.proxy_jumps) + elif self.proxy_jump is not None: + return [self.proxy_jump] + else: + return [] + + @property + def location_list(self) -> list[str]: + """Return locations as a list.""" + if self.locations is not None: + return list(self.locations) + elif self.location is not None: + return [self.location] + else: + return [] + + +class RemoteVolume(_BaseModel): + model_config = ConfigDict(frozen=True) + type: Literal["remote"] = "remote" + """A remote volume accessible via SSH.""" + slug: Slug + ssh_endpoint: str = Field(..., min_length=1) + ssh_endpoints: Optional[List[str]] = None + path: str = Field(..., min_length=1) + + +Volume = Annotated[ + Union[LocalVolume, RemoteVolume], Field(discriminator="type") +] + + +class BtrfsSnapshotConfig(_BaseModel): + """Configuration for btrfs snapshot management.""" + + model_config = ConfigDict(frozen=True) + enabled: bool = False + max_snapshots: Optional[int] = Field(default=None, ge=1) + + +class HardLinkSnapshotConfig(_BaseModel): + """Configuration for hard-link-based snapshot management.""" + + model_config = ConfigDict(frozen=True) + enabled: bool = False + max_snapshots: Optional[int] = Field(default=None, ge=1) + + +class SyncEndpoint(_BaseModel): + """A sync endpoint referencing a volume by slug. + + When used as a destination, snapshot config controls how + backups are stored (btrfs subvolume snapshots or hard-link + copies). When used as a source, snapshot config tells rsync + to read from the ``latest/`` directory instead of the volume + root. + """ + + volume: str = Field(..., min_length=1) + subdir: Optional[str] = None + btrfs_snapshots: BtrfsSnapshotConfig = Field( + default_factory=lambda: BtrfsSnapshotConfig() + ) + hard_link_snapshots: HardLinkSnapshotConfig = Field( + default_factory=lambda: HardLinkSnapshotConfig() + ) + + @model_validator(mode="after") + def validate_snapshot_exclusivity( + self, + ) -> SyncEndpoint: + if self.btrfs_snapshots.enabled and self.hard_link_snapshots.enabled: + raise ValueError( + "btrfs-snapshots and hard-link-snapshots" + " are mutually exclusive" + ) + return self + + @property + def snapshot_mode( + self, + ) -> Literal["none", "btrfs", "hard-link"]: + if self.btrfs_snapshots.enabled: + return "btrfs" + elif self.hard_link_snapshots.enabled: + return "hard-link" + else: + return "none" + + +# Backwards-compatible alias — existing code that imports +# DestinationSyncEndpoint continues to work unchanged. +DestinationSyncEndpoint = SyncEndpoint + + +class RsyncOptions(_BaseModel): + """Rsync flag configuration for a sync operation.""" + + model_config = ConfigDict(frozen=True) + compress: bool = False + checksum: bool = True + default_options_override: Optional[List[str]] = None + extra_options: List[str] = Field(default_factory=list) + + +class SyncConfig(_BaseModel): + """Configuration for a single sync operation.""" + + slug: Slug + source: SyncEndpoint + destination: SyncEndpoint + enabled: bool = True + rsync_options: RsyncOptions = Field(default_factory=lambda: RsyncOptions()) + filters: List[str] = Field(default_factory=list) + filter_file: Optional[str] = None + + @field_validator("filters", mode="before") + @classmethod + def normalize_filters(cls, v: Any) -> list[str]: + result: list[str] = [] + for item in v: + match item: + case str(): + result.append(item) + case {"include": str() as pattern}: + result.append(f"+ {pattern}") + case {"exclude": str() as pattern}: + result.append(f"- {pattern}") + case _: + raise ValueError( + f"Filter must be a string or a dict" + f" with 'include'/'exclude' key," + f" got: {item!r}" + ) + return result + + +class EndpointFilter(_BaseModel): + """Endpoint selection filter (not serialized).""" + + model_config = ConfigDict(frozen=True) + locations: List[str] = Field(default_factory=list) + network: Optional[Literal["private", "public"]] = None + + +class Config(_BaseModel): + """Top-level NBKP configuration.""" + + ssh_endpoints: Dict[str, SshEndpoint] = Field(default_factory=dict) + + @model_validator(mode="before") + @classmethod + def resolve_ssh_endpoint_extends(cls, data: Any) -> Any: + """Resolve `extends` inheritance on ssh-endpoints.""" + if not isinstance(data, dict): + return data + endpoints = ( + data.get("ssh-endpoints") or data.get("ssh_endpoints") or {} + ) + if not isinstance(endpoints, dict): + return data + + resolved: dict[str, Any] = {} + + def _resolve(slug: str, chain: list[str]) -> Any: + if slug in resolved: + return resolved[slug] + ep = endpoints[slug] + if not isinstance(ep, dict): + resolved[slug] = ep + return ep + parent_slug = ep.get("extends") + if parent_slug is None: + resolved[slug] = ep + return ep + if parent_slug in chain: + chain_str = " -> ".join(chain + [parent_slug]) + raise ValueError(f"Circular extends chain: {chain_str}") + if parent_slug not in endpoints: + raise ValueError( + f"Endpoint '{slug}' extends " + f"unknown endpoint '{parent_slug}'" + ) + parent = _resolve(parent_slug, chain + [slug]) + if not isinstance(parent, dict): + resolved[slug] = ep + return ep + merged = { + **parent, + **{k: v for k, v in ep.items() if k != "extends"}, + } + # If child sets proxy-jump or proxy-jumps, remove + # the other to avoid exclusivity clash with parent + proxy_keys = {"proxy-jump", "proxy-jumps"} + child_proxy_keys = proxy_keys & set(ep.keys()) + if child_proxy_keys: + for k in proxy_keys - child_proxy_keys: + merged.pop(k, None) + # Same for location / locations + loc_keys = {"location", "locations"} + child_loc_keys = loc_keys & set(ep.keys()) + if child_loc_keys: + for k in loc_keys - child_loc_keys: + merged.pop(k, None) + resolved[slug] = merged + return merged + + for slug in endpoints: + _resolve(slug, []) + + data = {**data} + if "ssh-endpoints" in data: + data["ssh-endpoints"] = resolved + else: + data["ssh_endpoints"] = resolved + return data + + @field_validator("ssh_endpoints", mode="before") + @classmethod + def inject_ssh_endpoint_slugs(cls, v: Any, info: ValidationInfo) -> Any: + return { + slug: ( + {**data, "slug": slug} + if isinstance(data, dict) and "slug" not in data + else data + ) + for slug, data in v.items() + } + + volumes: Dict[str, Volume] = Field(default_factory=dict) + + # The volume slug is the key in the volumes dict, + # but we also want it as a field in the Volume objects. + @field_validator("volumes", mode="before") + @classmethod + def inject_volume_slugs(cls, v: Any, info: ValidationInfo) -> Any: + return { + slug: ( + {**data, "slug": slug} + if isinstance(data, dict) and "slug" not in data + else data + ) + for slug, data in v.items() + } + + syncs: Dict[str, SyncConfig] = Field(default_factory=dict) + + # The sync slug is the key in the syncs dict, + # but we also want it as a field in the SyncConfig objects. + @field_validator("syncs", mode="before") + @classmethod + def inject_sync_slugs(cls, v: Any, info: ValidationInfo) -> Any: + return { + slug: ( + {**data, "slug": slug} + if isinstance(data, dict) and "slug" not in data + else data + ) + for slug, data in v.items() + } + + def resolve_endpoint_for_volume( + self, + vol: RemoteVolume, + endpoint_filter: EndpointFilter | None = None, + ) -> SshEndpoint: + """Select the best SSH endpoint for a remote volume. + + Uses ``endpoint_filter`` (location, network) to narrow + candidates. Falls back to the primary ``ssh_endpoint``. + """ + from ..remote.resolution import is_private_host + + candidates = ( + list(vol.ssh_endpoints) + if vol.ssh_endpoints + else [vol.ssh_endpoint] + ) + + ef = endpoint_filter + if ef is None: + return self.ssh_endpoints[candidates[0]] + + # DNS reachability: drop endpoints whose host + # cannot be resolved + reachable = [ + slug + for slug in candidates + if is_private_host(self.ssh_endpoints[slug].host) is not None + ] + if not reachable: + return self.ssh_endpoints[vol.ssh_endpoint] + + # Location filter + if ef.locations: + filter_locs = set(ef.locations) + by_loc = [ + slug + for slug in reachable + if filter_locs & set(self.ssh_endpoints[slug].location_list) + ] + if by_loc: + reachable = by_loc + + # Network filter (private / public) + if ef.network is not None: + want_private = ef.network == "private" + by_net = [ + slug + for slug in reachable + if is_private_host(self.ssh_endpoints[slug].host) + == want_private + ] + if by_net: + reachable = by_net + + # Deterministic pick: first candidate in original order + return self.ssh_endpoints[reachable[0]] + + def resolve_proxy_chain(self, server: SshEndpoint) -> list[SshEndpoint]: + """Resolve the proxy-jump chain as a list of SshEndpoints.""" + return [self.ssh_endpoints[slug] for slug in server.proxy_jump_chain] + + @model_validator(mode="after") + def validate_cross_references(self) -> Config: + for slug, server in self.ssh_endpoints.items(): + chain = server.proxy_jump_chain + for hop in chain: + if hop not in self.ssh_endpoints: + raise ValueError( + f"Server '{slug}' references " + f"unknown proxy-jump server " + f"'{hop}'" + ) + # Circular detection via BFS through transitive + # proxy chains + visited: set[str] = {slug} + queue = list(chain) + while queue: + current = queue.pop(0) + if current in visited: + raise ValueError( + f"Circular proxy-jump chain " + f"detected starting from " + f"server '{slug}'" + ) + visited.add(current) + queue.extend(self.ssh_endpoints[current].proxy_jump_chain) + + for vol_slug, vol in self.volumes.items(): + match vol: + case RemoteVolume(): + if vol.ssh_endpoint not in self.ssh_endpoints: + ref = vol.ssh_endpoint + raise ValueError( + f"Volume '{vol_slug}' references " + f"unknown ssh-endpoint '{ref}'" + ) + if vol.ssh_endpoints is not None: + for ep_ref in vol.ssh_endpoints: + if ep_ref not in self.ssh_endpoints: + raise ValueError( + f"Volume '{vol_slug}'" + f" references unknown" + f" ssh-endpoint" + f" '{ep_ref}'" + ) + for sync_slug, sync in self.syncs.items(): + if sync.source.volume not in self.volumes: + src = sync.source.volume + raise ValueError( + f"Sync '{sync_slug}' references " + f"unknown source volume '{src}'" + ) + if sync.destination.volume not in self.volumes: + dst = sync.destination.volume + raise ValueError( + f"Sync '{sync_slug}' references " + f"unknown destination volume '{dst}'" + ) + src_vol = self.volumes[sync.source.volume] + dst_vol = self.volumes[sync.destination.volume] + if ( + isinstance(src_vol, RemoteVolume) + and isinstance(dst_vol, RemoteVolume) + and src_vol.ssh_endpoint != dst_vol.ssh_endpoint + ): + raise ValueError( + f"Sync '{sync_slug}' has source on" + f" '{src_vol.ssh_endpoint}' and" + f" destination on" + f" '{dst_vol.ssh_endpoint}'." + f" Cross-server remote-to-remote" + f" syncs are not supported." + f" Use two separate syncs through" + f" the local machine instead." + ) + return self diff --git a/nbkp/config/resolution.py b/nbkp/config/resolution.py new file mode 100644 index 0000000..6be922e --- /dev/null +++ b/nbkp/config/resolution.py @@ -0,0 +1,50 @@ +"""Endpoint resolution: resolve SSH endpoints once per command.""" + +from __future__ import annotations + +from pydantic import ConfigDict + +from pydantic import Field + +from .protocol import ( + Config, + EndpointFilter, + RemoteVolume, + SshEndpoint, + _BaseModel, +) + + +class ResolvedEndpoint(_BaseModel): + """Pre-resolved SSH endpoint with proxy chain.""" + + model_config = ConfigDict(frozen=True) + server: SshEndpoint + proxy_chain: list[SshEndpoint] = Field(default_factory=list) + + +ResolvedEndpoints = dict[str, ResolvedEndpoint] + + +def resolve_all_endpoints( + config: Config, + endpoint_filter: EndpointFilter | None = None, +) -> ResolvedEndpoints: + """Resolve SSH endpoints for all remote volumes. + + Returns a mapping from volume slug to ResolvedEndpoint. + Local volumes are not included in the result. + """ + result: dict[str, ResolvedEndpoint] = {} + for vol in config.volumes.values(): + match vol: + case RemoteVolume(): + server = config.resolve_endpoint_for_volume( + vol, endpoint_filter + ) + proxy_chain = config.resolve_proxy_chain(server) + result[vol.slug] = ResolvedEndpoint( + server=server, + proxy_chain=proxy_chain, + ) + return result diff --git a/nbkp/output.py b/nbkp/output.py new file mode 100644 index 0000000..914d803 --- /dev/null +++ b/nbkp/output.py @@ -0,0 +1,845 @@ +"""CLI output formatting.""" + +from __future__ import annotations + +import enum +import shlex + +from pydantic import ValidationError +from rich.console import Console, Group, RenderableType +from rich.padding import Padding +from rich.panel import Panel +from rich.syntax import Syntax +from rich.table import Table +from rich.text import Text + +from .config import ( + Config, + ConfigError, + DestinationSyncEndpoint, + LocalVolume, + RemoteVolume, + ResolvedEndpoints, + SshEndpoint, + SyncConfig, + SyncEndpoint, +) +from .sync import PruneResult, SyncResult +from .sync.rsync import build_rsync_command +from .check import SyncReason, SyncStatus, VolumeReason, VolumeStatus +from .remote.ssh import format_proxy_jump_chain + + +class OutputFormat(str, enum.Enum): + """Output format for CLI commands.""" + + HUMAN = "human" + JSON = "json" + + +def _status_text( + active: bool, + reasons: list[VolumeReason] | list[SyncReason], +) -> Text: + """Format status with optional reasons as styled text.""" + if active: + return Text("active", style="green") + else: + reason_str = ", ".join(r.value for r in reasons) + return Text(f"inactive ({reason_str})", style="red") + + +def _sync_options(sync: SyncConfig) -> str: + """Build a comma-separated string of enabled sync options.""" + opts: list[str] = [] + if sync.filters or sync.filter_file: + opts.append("rsync-filter") + if sync.source.snapshot_mode != "none": + opts.append(f"src:{sync.source.snapshot_mode}") + if sync.destination.btrfs_snapshots.enabled: + btrfs_label = "btrfs-snapshots" + max_snap = sync.destination.btrfs_snapshots.max_snapshots + if max_snap is not None: + btrfs_label += f"(max:{max_snap})" + opts.append(btrfs_label) + if sync.destination.hard_link_snapshots.enabled: + hl_label = "hard-link-snapshots" + max_snap = sync.destination.hard_link_snapshots.max_snapshots + if max_snap is not None: + hl_label += f"(max:{max_snap})" + opts.append(hl_label) + return ", ".join(opts) + + +def format_volume_display( + vol: LocalVolume | RemoteVolume, + resolved_endpoints: ResolvedEndpoints, +) -> str: + """Format a volume for human display.""" + match vol: + case RemoteVolume(): + ep = resolved_endpoints[vol.slug] + if ep.server.user: + host_part = f"{ep.server.user}@{ep.server.host}" + else: + host_part = ep.server.host + if ep.server.port != 22: + host_part += f":{ep.server.port}" + return f"{host_part}:{vol.path}" + case LocalVolume(): + return vol.path + + +def build_check_sections( + vol_statuses: dict[str, VolumeStatus], + sync_statuses: dict[str, SyncStatus], + config: Config, + resolved_endpoints: ResolvedEndpoints, +) -> list[RenderableType]: + """Build renderable sections for check output.""" + sections: list[RenderableType] = [] + + if config.ssh_endpoints: + ep_table = Table(title="SSH Endpoints:") + ep_table.add_column("Name", style="bold") + ep_table.add_column("Host") + ep_table.add_column("Port") + ep_table.add_column("User") + ep_table.add_column("Key") + ep_table.add_column("Proxy Jump") + ep_table.add_column("Locations") + + for server in config.ssh_endpoints.values(): + ep_table.add_row( + server.slug, + server.host, + str(server.port), + server.user or "", + server.key or "", + ", ".join(server.proxy_jump_chain) or "", + ", ".join(server.location_list), + ) + + sections.append(ep_table) + sections.append(Text("")) + + vol_table = Table(title="Volumes:") + vol_table.add_column("Name", style="bold") + vol_table.add_column("Type") + vol_table.add_column("SSH Endpoint") + vol_table.add_column("URI") + vol_table.add_column("Status") + + for vs in vol_statuses.values(): + vol = vs.config + match vol: + case RemoteVolume(): + vol_type = "remote" + ep = resolved_endpoints.get(vol.slug) + ssh_ep = ep.server.slug if ep else vol.ssh_endpoint + case LocalVolume(): + vol_type = "local" + ssh_ep = "" + vol_table.add_row( + vs.slug, + vol_type, + ssh_ep, + format_volume_display(vol, resolved_endpoints), + _status_text(vs.active, vs.reasons), + ) + + sections.append(vol_table) + sections.append(Text("")) + + sync_table = Table(title="Syncs:") + sync_table.add_column("Name", style="bold") + sync_table.add_column("Source") + sync_table.add_column("Destination") + sync_table.add_column("Options") + sync_table.add_column("Status") + + for ss in sync_statuses.values(): + sync_table.add_row( + ss.slug, + _sync_endpoint_display(ss.config.source), + _sync_endpoint_display(ss.config.destination), + _sync_options(ss.config), + _status_text(ss.active, ss.reasons), + ) + + sections.append(sync_table) + + active_syncs = [ss for ss in sync_statuses.values() if ss.active] + if active_syncs: + sections.append(Text("")) + cmd_table = Table(title="Rsync Commands:") + cmd_table.add_column("Sync", style="bold") + cmd_table.add_column("Command") + + for ss in active_syncs: + cmd = build_rsync_command( + ss.config, + config, + resolved_endpoints=resolved_endpoints, + ) + cmd_table.add_row(ss.slug, shlex.join(cmd)) + + sections.append(cmd_table) + + return sections + + +def print_human_check( + vol_statuses: dict[str, VolumeStatus], + sync_statuses: dict[str, SyncStatus], + config: Config, + *, + console: Console | None = None, + resolved_endpoints: ResolvedEndpoints | None = None, + wrap_in_panel: bool = True, +) -> None: + """Print human-readable status output.""" + re = resolved_endpoints or {} + if console is None: + console = Console() + + sections = build_check_sections(vol_statuses, sync_statuses, config, re) + + if wrap_in_panel: + console.print( + Panel( + Group(*sections), + title="[bold]Check Results[/bold]", + border_style="cyan", + padding=(0, 1), + ) + ) + else: + for section in sections: + console.print(section) + + +def print_human_results( + results: list[SyncResult], + dry_run: bool, + *, + console: Console | None = None, +) -> None: + """Print human-readable run results.""" + if console is None: + console = Console() + mode = " (dry run)" if dry_run else "" + + table = Table( + title=f"Sync results{mode}:", + ) + table.add_column("Name", style="bold") + table.add_column("Status") + table.add_column("Details") + + for r in results: + if r.success: + status = Text("OK", style="green") + else: + status = Text("FAILED", style="red") + + details_parts: list[str] = [] + if r.error: + details_parts.append(f"Error: {r.error}") + if r.snapshot_path: + details_parts.append(f"Snapshot: {r.snapshot_path}") + if r.pruned_paths: + details_parts.append(f"Pruned: {len(r.pruned_paths)} snapshot(s)") + if r.output and not r.success: + lines = r.output.strip().split("\n")[:5] + details_parts.extend(lines) + + table.add_row( + r.sync_slug, + status, + "\n".join(details_parts), + ) + + console.print(table) + + +def print_human_prune_results( + results: list[PruneResult], + dry_run: bool, + *, + console: Console | None = None, +) -> None: + """Print human-readable prune results.""" + if console is None: + console = Console() + mode = " (dry run)" if dry_run else "" + + table = Table( + title=f"NBKP prune{mode}:", + ) + table.add_column("Name", style="bold") + table.add_column("Deleted") + table.add_column("Kept") + table.add_column("Status") + + for r in results: + if r.error: + status = Text("FAILED", style="red") + else: + status = Text("OK", style="green") + + table.add_row( + r.sync_slug, + str(len(r.deleted)), + str(r.kept), + status, + ) + + console.print(table) + + +def _ssh_prefix( + server: SshEndpoint, + proxy_chain: list[SshEndpoint] | None = None, +) -> str: + """Build human-friendly SSH command prefix.""" + parts = ["ssh"] + if server.port != 22: + parts.extend(["-p", str(server.port)]) + if server.key: + parts.extend(["-i", server.key]) + if proxy_chain: + parts.extend(["-J", format_proxy_jump_chain(proxy_chain)]) + host = f"{server.user}@{server.host}" if server.user else server.host + parts.append(host) + return " ".join(parts) + + +def _wrap_cmd( + cmd: str, + vol: LocalVolume | RemoteVolume, + resolved_endpoints: ResolvedEndpoints, +) -> str: + """Wrap a shell command for remote execution.""" + match vol: + case LocalVolume(): + return cmd + case RemoteVolume(): + ep = resolved_endpoints[vol.slug] + prefix = _ssh_prefix(ep.server, ep.proxy_chain) + return f"{prefix} '{cmd}'" + + +def _endpoint_path( + vol: LocalVolume | RemoteVolume, + subdir: str | None, +) -> str: + """Resolve the full endpoint path.""" + if subdir: + return f"{vol.path}/{subdir}" + else: + return vol.path + + +def _host_label( + vol: LocalVolume | RemoteVolume, + resolved_endpoints: ResolvedEndpoints, +) -> str: + """Human-readable host label for a volume.""" + match vol: + case LocalVolume(): + return "this machine" + case RemoteVolume(): + ep = resolved_endpoints[vol.slug] + return ep.server.host + + +_INDENT = " " + +_RSYNC_INSTALL = ( + "Ubuntu/Debian: sudo apt install rsync\n" + "Fedora/RHEL: sudo dnf install rsync\n" + "macOS: brew install rsync" +) + +_BTRFS_INSTALL = ( + "Ubuntu/Debian: sudo apt install btrfs-progs\n" + "Fedora/RHEL: sudo dnf install btrfs-progs" +) + +_COREUTILS_INSTALL = ( + "Ubuntu/Debian: sudo apt install coreutils\n" + "Fedora/RHEL: sudo dnf install coreutils" +) + +_UTIL_LINUX_INSTALL = ( + "Ubuntu/Debian: sudo apt install util-linux\n" + "Fedora/RHEL: sudo dnf install util-linux" +) + + +def _print_cmd( + console: Console, + cmd: str, + indent: int = 2, +) -> None: + """Print a shell command with bash syntax highlighting. + + ``indent`` is the number of ``_INDENT`` levels (each 2 spaces). + """ + syntax = Syntax( + cmd, + "bash", + theme="monokai", + background_color="default", + ) + pad = len(_INDENT) * indent + console.print(Padding(syntax, (0, 0, 0, pad))) + + +def _print_sentinel_fix( + console: Console, + vol: LocalVolume | RemoteVolume, + path: str, + sentinel: str, + resolved_endpoints: ResolvedEndpoints, +) -> None: + """Print sentinel creation fix with mount reminder.""" + p2 = _INDENT * 2 + console.print(f"{p2}Ensure the volume is mounted, then:") + _print_cmd( + console, + _wrap_cmd(f"mkdir -p {path}", vol, resolved_endpoints), + ) + _print_cmd( + console, + _wrap_cmd( + f"touch {path}/{sentinel}", + vol, + resolved_endpoints, + ), + ) + + +def _print_ssh_troubleshoot( + console: Console, + server: SshEndpoint, + proxy_chain: list[SshEndpoint] | None = None, +) -> None: + """Print SSH connectivity troubleshooting instructions.""" + p2 = _INDENT * 2 + p3 = _INDENT * 3 + ssh_cmd = _ssh_prefix(server, proxy_chain) + port_flag = f"-p {server.port} " if server.port != 22 else "" + proxy_opt = "" + if proxy_chain: + jump_str = format_proxy_jump_chain(proxy_chain) + proxy_opt = f"-o ProxyJump={jump_str} " + user_host = f"{server.user}@{server.host}" if server.user else server.host + console.print(f"{p2}Server {server.host} is unreachable.") + console.print(f"{p2}Verify connectivity:") + _print_cmd(console, f"{ssh_cmd} echo ok", indent=3) + console.print(f"{p2}If authentication fails:") + if server.key: + console.print(f"{p3}1. Ensure the key exists:") + _print_cmd(console, f"ls -l {server.key}", indent=4) + console.print(f"{p3}2. Copy it to the server:") + _print_cmd( + console, + f"ssh-copy-id {proxy_opt}{port_flag}" + f"-i {server.key} {user_host}", + indent=4, + ) + else: + console.print(f"{p3}1. Generate a key:") + _print_cmd(console, "ssh-keygen -t ed25519", indent=4) + console.print(f"{p3}2. Copy it to the server:") + _print_cmd( + console, + f"ssh-copy-id {proxy_opt}{port_flag}" f"{user_host}", + indent=4, + ) + console.print(f"{p3}3. Verify passwordless login:") + _print_cmd(console, f"{ssh_cmd} echo ok", indent=4) + + +def _print_sync_reason_fix( + console: Console, + sync: SyncConfig, + reason: SyncReason, + config: Config, + resolved_endpoints: ResolvedEndpoints, +) -> None: + """Print fix instructions for a sync reason.""" + p2 = _INDENT * 2 + match reason: + case SyncReason.DISABLED: + console.print(f"{p2}Enable the sync in the" " configuration file.") + case SyncReason.SOURCE_UNAVAILABLE: + src = config.volumes[sync.source.volume] + match src: + case RemoteVolume(): + ep = resolved_endpoints[src.slug] + _print_ssh_troubleshoot( + console, + ep.server, + ep.proxy_chain, + ) + case LocalVolume(): + console.print( + f"{p2}Source volume" + f" '{sync.source.volume}'" + " is not available." + ) + case SyncReason.DESTINATION_UNAVAILABLE: + dst = config.volumes[sync.destination.volume] + match dst: + case RemoteVolume(): + ep = resolved_endpoints[dst.slug] + _print_ssh_troubleshoot( + console, + ep.server, + ep.proxy_chain, + ) + case LocalVolume(): + console.print( + f"{p2}Destination volume" + f" '{sync.destination.volume}'" + " is not available." + ) + case SyncReason.SOURCE_SENTINEL_NOT_FOUND: + src = config.volumes[sync.source.volume] + path = _endpoint_path(src, sync.source.subdir) + _print_sentinel_fix( + console, + src, + path, + ".nbkp-src", + resolved_endpoints, + ) + case SyncReason.SOURCE_LATEST_NOT_FOUND: + src = config.volumes[sync.source.volume] + path = _endpoint_path(src, sync.source.subdir) + console.print( + f"{p2}Source has snapshots enabled" + f" but {path}/latest does not" + " exist. Ensure the upstream" + " sync has run at least once," + " or create it manually:" + ) + if sync.source.btrfs_snapshots.enabled: + cmds = [ + "sudo btrfs subvolume create" f" {path}/latest", + "sudo chown :" f" {path}/latest", + ] + else: + cmds = [ + f"mkdir -p {path}/snapshots/initial", + f"ln -sfn snapshots/initial" f" {path}/latest", + ] + for cmd in cmds: + _print_cmd( + console, + _wrap_cmd(cmd, src, resolved_endpoints), + ) + case SyncReason.SOURCE_SNAPSHOTS_DIR_NOT_FOUND: + src = config.volumes[sync.source.volume] + path = _endpoint_path(src, sync.source.subdir) + if sync.source.btrfs_snapshots.enabled: + cmds = [ + f"sudo mkdir {path}/snapshots", + "sudo chown :" f" {path}/snapshots", + ] + else: + cmds = [f"mkdir -p {path}/snapshots"] + for cmd in cmds: + _print_cmd( + console, + _wrap_cmd(cmd, src, resolved_endpoints), + ) + case SyncReason.DESTINATION_SENTINEL_NOT_FOUND: + dst = config.volumes[sync.destination.volume] + path = _endpoint_path(dst, sync.destination.subdir) + _print_sentinel_fix( + console, + dst, + path, + ".nbkp-dst", + resolved_endpoints, + ) + case SyncReason.RSYNC_NOT_FOUND_ON_SOURCE: + src = config.volumes[sync.source.volume] + host = _host_label(src, resolved_endpoints) + console.print(f"{p2}Install rsync on {host}:") + _print_cmd(console, _RSYNC_INSTALL, indent=3) + case SyncReason.RSYNC_NOT_FOUND_ON_DESTINATION: + dst = config.volumes[sync.destination.volume] + host = _host_label(dst, resolved_endpoints) + console.print(f"{p2}Install rsync on {host}:") + _print_cmd(console, _RSYNC_INSTALL, indent=3) + case SyncReason.BTRFS_NOT_FOUND_ON_DESTINATION: + dst = config.volumes[sync.destination.volume] + host = _host_label(dst, resolved_endpoints) + console.print(f"{p2}Install btrfs-progs on {host}:") + _print_cmd(console, _BTRFS_INSTALL, indent=3) + case SyncReason.STAT_NOT_FOUND_ON_DESTINATION: + dst = config.volumes[sync.destination.volume] + host = _host_label(dst, resolved_endpoints) + console.print(f"{p2}Install coreutils (stat)" f" on {host}:") + _print_cmd(console, _COREUTILS_INSTALL, indent=3) + case SyncReason.FINDMNT_NOT_FOUND_ON_DESTINATION: + dst = config.volumes[sync.destination.volume] + host = _host_label(dst, resolved_endpoints) + console.print(f"{p2}Install util-linux (findmnt)" f" on {host}:") + _print_cmd(console, _UTIL_LINUX_INSTALL, indent=3) + case SyncReason.DESTINATION_NOT_BTRFS: + console.print( + f"{p2}The destination is not on" " a btrfs filesystem." + ) + case SyncReason.DESTINATION_NOT_BTRFS_SUBVOLUME: + dst = config.volumes[sync.destination.volume] + path = _endpoint_path(dst, sync.destination.subdir) + cmds = [ + f"sudo btrfs subvolume create {path}/latest", + f"sudo mkdir {path}/snapshots", + "sudo chown :" f" {path}/latest {path}/snapshots", + ] + for cmd in cmds: + _print_cmd( + console, + _wrap_cmd(cmd, dst, resolved_endpoints), + ) + case SyncReason.DESTINATION_NOT_MOUNTED_USER_SUBVOL_RM: + dst = config.volumes[sync.destination.volume] + console.print( + f"{p2}Remount the btrfs volume" " with user_subvol_rm_allowed:" + ) + cmd = ( + "sudo mount -o" + " remount,user_subvol_rm_allowed" + f" {dst.path}" + ) + _print_cmd( + console, + _wrap_cmd(cmd, dst, resolved_endpoints), + ) + console.print( + f"{p2}To persist, add" + " user_subvol_rm_allowed to" + " the mount options in /etc/fstab" + f" for {dst.path}." + ) + case SyncReason.DESTINATION_LATEST_NOT_FOUND: + dst = config.volumes[sync.destination.volume] + path = _endpoint_path(dst, sync.destination.subdir) + cmds = [ + f"sudo btrfs subvolume create {path}/latest", + "sudo chown :" f" {path}/latest", + ] + for cmd in cmds: + _print_cmd( + console, + _wrap_cmd(cmd, dst, resolved_endpoints), + ) + case SyncReason.DESTINATION_SNAPSHOTS_DIR_NOT_FOUND: + dst = config.volumes[sync.destination.volume] + path = _endpoint_path(dst, sync.destination.subdir) + if sync.destination.hard_link_snapshots.enabled: + cmds = [f"mkdir -p {path}/snapshots"] + else: + cmds = [ + f"sudo mkdir {path}/snapshots", + "sudo chown :" f" {path}/snapshots", + ] + for cmd in cmds: + _print_cmd( + console, + _wrap_cmd(cmd, dst, resolved_endpoints), + ) + case SyncReason.DESTINATION_NO_HARDLINK_SUPPORT: + console.print( + f"{p2}The destination filesystem does not" + " support hard links (e.g. FAT/exFAT)." + " Use a filesystem like ext4, xfs, or" + " btrfs, or use btrfs-snapshots instead." + ) + + +def print_human_troubleshoot( + vol_statuses: dict[str, VolumeStatus], + sync_statuses: dict[str, SyncStatus], + config: Config, + *, + console: Console | None = None, + resolved_endpoints: ResolvedEndpoints | None = None, +) -> None: + """Print troubleshooting instructions.""" + re = resolved_endpoints or {} + if console is None: + console = Console() + has_issues = False + + failed_vols = [vs for vs in vol_statuses.values() if vs.reasons] + failed_syncs = [ss for ss in sync_statuses.values() if ss.reasons] + has_issues = bool(failed_vols or failed_syncs) + + for vs in failed_vols: + console.print(f"\n[bold]Volume {vs.slug!r}:[/bold]") + vol = vs.config + for reason in vs.reasons: + console.print(f"{_INDENT}{reason.value}") + match reason: + case VolumeReason.SENTINEL_NOT_FOUND: + _print_sentinel_fix( + console, + vol, + vol.path, + ".nbkp-vol", + re, + ) + case VolumeReason.UNREACHABLE: + match vol: + case RemoteVolume(): + ep = re[vol.slug] + _print_ssh_troubleshoot( + console, + ep.server, + ep.proxy_chain, + ) + + for ss in failed_syncs: + console.print(f"\n[bold]Sync {ss.slug!r}:[/bold]") + for sync_reason in ss.reasons: + console.print(f"{_INDENT}{sync_reason.value}") + _print_sync_reason_fix( + console, + ss.config, + sync_reason, + config, + re, + ) + + if not has_issues: + console.print("No issues found." " All volumes and syncs are active.") + + +def _sync_endpoint_display( + endpoint: SyncEndpoint | DestinationSyncEndpoint, +) -> str: + """Format a sync endpoint as volume or volume/subdir.""" + if endpoint.subdir: + return f"{endpoint.volume}:/{endpoint.subdir}" + else: + return endpoint.volume + + +def print_human_config( + config: Config, + *, + console: Console | None = None, + resolved_endpoints: ResolvedEndpoints | None = None, +) -> None: + """Print human-readable configuration.""" + re = resolved_endpoints or {} + if console is None: + console = Console() + + if config.ssh_endpoints: + server_table = Table(title="SSH Endpoints:") + server_table.add_column("Name", style="bold") + server_table.add_column("Host") + server_table.add_column("Port") + server_table.add_column("User") + server_table.add_column("Key") + server_table.add_column("Proxy Jump") + server_table.add_column("Locations") + + for server in config.ssh_endpoints.values(): + server_table.add_row( + server.slug, + server.host, + str(server.port), + server.user or "", + server.key or "", + ", ".join(server.proxy_jump_chain) or "", + ", ".join(server.location_list), + ) + + console.print(server_table) + console.print() + + vol_table = Table(title="Volumes:") + vol_table.add_column("Name", style="bold") + vol_table.add_column("Type") + vol_table.add_column("SSH Endpoint") + vol_table.add_column("URI") + + for vol in config.volumes.values(): + match vol: + case RemoteVolume(): + vol_type = "remote" + ep = re.get(vol.slug) + ssh_ep = ep.server.slug if ep else vol.ssh_endpoint + case LocalVolume(): + vol_type = "local" + ssh_ep = "" + vol_table.add_row( + vol.slug, + vol_type, + ssh_ep, + format_volume_display(vol, re), + ) + + console.print(vol_table) + console.print() + + sync_table = Table(title="Syncs:") + sync_table.add_column("Name", style="bold") + sync_table.add_column("Source") + sync_table.add_column("Destination") + sync_table.add_column("Options") + sync_table.add_column("Enabled") + + for sync in config.syncs.values(): + enabled = ( + Text("yes", style="green") + if sync.enabled + else Text("no", style="red") + ) + sync_table.add_row( + sync.slug, + _sync_endpoint_display(sync.source), + _sync_endpoint_display(sync.destination), + _sync_options(sync), + enabled, + ) + + console.print(sync_table) + + +def print_config_error( + e: ConfigError, + *, + console: Console | None = None, +) -> None: + """Print a ConfigError as a Rich panel to stderr.""" + if console is None: + console = Console(stderr=True) + cause = e.__cause__ + match cause: + case ValidationError(): + lines: list[str] = [] + for err in cause.errors(): + loc = " → ".join(str(p) for p in err["loc"]) + msg = err["msg"] + if msg.startswith("Value error, "): + prefix_len = len("Value error, ") + msg = msg[prefix_len:] + if loc: + lines.append(f"{loc}: {msg}") + else: + lines.append(msg) + body = "\n".join(lines) + case _: + body = str(e) + console.print(Panel(body, title="Config error", style="red")) diff --git a/nbkp/remote/__init__.py b/nbkp/remote/__init__.py new file mode 100644 index 0000000..f010c1c --- /dev/null +++ b/nbkp/remote/__init__.py @@ -0,0 +1,15 @@ +"""Remote command execution and SSH utilities.""" + +from .fabricssh import ( + build_ssh_base_args, + build_ssh_e_option, + format_remote_path, + run_remote_command, +) + +__all__ = [ + "build_ssh_base_args", + "build_ssh_e_option", + "format_remote_path", + "run_remote_command", +] diff --git a/nbkp/remote/fabricssh.py b/nbkp/remote/fabricssh.py new file mode 100644 index 0000000..fa3188c --- /dev/null +++ b/nbkp/remote/fabricssh.py @@ -0,0 +1,84 @@ +"""Fabric-based remote command execution.""" + +from __future__ import annotations + +import shlex +import subprocess + +import paramiko +from fabric import Connection # type: ignore[import-untyped] + +from ..config import SshEndpoint +from .ssh import build_ssh_base_args as build_ssh_base_args # noqa: F401 +from .ssh import build_ssh_e_option as build_ssh_e_option # noqa: F401 +from .ssh import format_remote_path as format_remote_path # noqa: F401 + + +def _build_single_connection( + server: SshEndpoint, + gateway: Connection | None = None, +) -> Connection: + """Build a single Fabric Connection with optional gateway.""" + opts = server.connection_options + connect_kwargs: dict[str, object] = { + "allow_agent": opts.allow_agent, + "look_for_keys": opts.look_for_keys, + "compress": opts.compress, + } + if opts.banner_timeout is not None: + connect_kwargs["banner_timeout"] = opts.banner_timeout + if opts.auth_timeout is not None: + connect_kwargs["auth_timeout"] = opts.auth_timeout + if opts.channel_timeout is not None: + connect_kwargs["channel_timeout"] = opts.channel_timeout + if opts.disabled_algorithms is not None: + connect_kwargs["disabled_algorithms"] = opts.disabled_algorithms + if server.key: + connect_kwargs["key_filename"] = server.key + + conn = Connection( + host=server.host, + port=server.port, + user=server.user, + connect_kwargs=connect_kwargs, + connect_timeout=opts.connect_timeout, + forward_agent=opts.forward_agent, + gateway=gateway, + ) + + if not opts.strict_host_key_checking: + conn.client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + return conn + + +def _build_connection( + server: SshEndpoint, + proxy_chain: list[SshEndpoint] | None = None, +) -> Connection: + """Build a Fabric Connection with optional proxy chain.""" + gateway: Connection | None = None + for proxy in proxy_chain or []: + gateway = _build_single_connection(proxy, gateway) + return _build_single_connection(server, gateway) + + +def run_remote_command( + server: SshEndpoint, + command: list[str], + proxy_chain: list[SshEndpoint] | None = None, +) -> subprocess.CompletedProcess[str]: + """Run a command on a remote host via Fabric.""" + cmd_string = " ".join(shlex.quote(arg) for arg in command) + with _build_connection(server, proxy_chain) as conn: + if server.connection_options.server_alive_interval is not None: + conn.transport.set_keepalive( + server.connection_options.server_alive_interval + ) + result = conn.run(cmd_string, warn=True, hide=True, in_stream=False) + return subprocess.CompletedProcess( + args=cmd_string, + returncode=result.exited, + stdout=result.stdout, + stderr=result.stderr, + ) diff --git a/nbkp/remote/resolution.py b/nbkp/remote/resolution.py new file mode 100644 index 0000000..cf39b53 --- /dev/null +++ b/nbkp/remote/resolution.py @@ -0,0 +1,61 @@ +"""SSH host resolution and network classification helpers.""" + +from __future__ import annotations + +import ipaddress +import socket +from pathlib import Path + +import paramiko + + +def _load_ssh_config() -> paramiko.SSHConfig | None: + """Load the user's SSH config if it exists.""" + config_path = Path.home() / ".ssh" / "config" + if config_path.exists(): + return paramiko.SSHConfig.from_path(str(config_path)) + else: + return None + + +def resolve_hostname(hostname: str) -> str: + """Resolve an SSH hostname through ~/.ssh/config. + + If the hostname is defined in SSH config (via HostName), + returns the resolved hostname. Otherwise returns the + original hostname unchanged. + """ + ssh_config = _load_ssh_config() + if ssh_config is not None: + result = ssh_config.lookup(hostname) + return result.get("hostname", hostname) + else: + return hostname + + +def resolve_host(hostname: str) -> set[str] | None: + """Resolve hostname to IP addresses. + + First resolves through SSH config, then via DNS. + Returns None if the hostname cannot be resolved. + """ + real_host = resolve_hostname(hostname) + try: + results = socket.getaddrinfo(real_host, None) + return {str(r[4][0]) for r in results} + except socket.gaierror: + return None + + +def is_private_host(hostname: str) -> bool | None: + """Check whether hostname resolves to private addresses. + + Returns True if all resolved addresses are private, + False if any is public, or None if the hostname + cannot be resolved. + """ + addrs = resolve_host(hostname) + if addrs is None: + return None + else: + return all(ipaddress.ip_address(a).is_private for a in addrs) diff --git a/nbkp/remote/ssh.py b/nbkp/remote/ssh.py new file mode 100644 index 0000000..d18bf5b --- /dev/null +++ b/nbkp/remote/ssh.py @@ -0,0 +1,151 @@ +"""SSH command building and remote command execution helpers.""" + +from __future__ import annotations + +import shlex +import subprocess + +from ..config import SshEndpoint, SshConnectionOptions + + +def _ssh_o_options(opts: SshConnectionOptions) -> list[str]: + """Derive SSH -o option values from structured options.""" + result = [ + f"ConnectTimeout={opts.connect_timeout}", + "BatchMode=yes", + ] + if opts.compress: + result.append("Compression=yes") + if opts.server_alive_interval is not None: + result.append(f"ServerAliveInterval=" f"{opts.server_alive_interval}") + if not opts.strict_host_key_checking: + result.append("StrictHostKeyChecking=no") + if opts.known_hosts_file is not None: + result.append(f"UserKnownHostsFile={opts.known_hosts_file}") + if opts.forward_agent: + result.append("ForwardAgent=yes") + return result + + +def format_proxy_jump_chain(proxies: list[SshEndpoint]) -> str: + """Format proxy chain as comma-separated [user@]host[:port] for -J.""" + parts: list[str] = [] + for proxy in proxies: + host = f"{proxy.user}@{proxy.host}" if proxy.user else proxy.host + if proxy.port != 22: + host += f":{proxy.port}" + parts.append(host) + return ",".join(parts) + + +def _build_proxy_command( + proxies: list[SshEndpoint], +) -> str: + """Build a nested ProxyCommand string for the proxy chain. + + Uses ProxyCommand instead of -J so that per-proxy SSH + options (e.g. StrictHostKeyChecking) are propagated to + each hop. + """ + proxy = proxies[0] + parts: list[str] = ["ssh"] + for opt in _ssh_o_options(proxy.connection_options): + parts.extend(["-o", opt]) + if proxy.port != 22: + parts.extend(["-p", str(proxy.port)]) + if proxy.key: + parts.extend(["-i", proxy.key]) + parts.append("-W") + parts.append("%h:%p") + host = f"{proxy.user}@{proxy.host}" if proxy.user else proxy.host + parts.append(host) + + inner_cmd = " ".join(parts) + + for proxy in proxies[1:]: + escaped_inner = inner_cmd.replace("%", "%%") + parts = ["ssh"] + for opt in _ssh_o_options(proxy.connection_options): + parts.extend(["-o", opt]) + parts.extend(["-o", f"ProxyCommand={escaped_inner}"]) + if proxy.port != 22: + parts.extend(["-p", str(proxy.port)]) + if proxy.key: + parts.extend(["-i", proxy.key]) + parts.append("-W") + parts.append("%h:%p") + host = f"{proxy.user}@{proxy.host}" if proxy.user else proxy.host + parts.append(host) + inner_cmd = " ".join(parts) + + return inner_cmd + + +def build_ssh_base_args( + server: SshEndpoint, + proxy_chain: list[SshEndpoint] | None = None, +) -> list[str]: + """Build base SSH command args for a remote volume. + + Returns args like: + ssh -o ConnectTimeout=10 -o BatchMode=yes [opts] host + """ + args = ["ssh"] + for opt in _ssh_o_options(server.connection_options): + args.extend(["-o", opt]) + if server.port != 22: + args.extend(["-p", str(server.port)]) + if server.key: + args.extend(["-i", server.key]) + if proxy_chain: + proxy_cmd = _build_proxy_command(proxy_chain) + args.extend(["-o", f"ProxyCommand={proxy_cmd}"]) + + host = f"{server.user}@{server.host}" if server.user else server.host + args.append(host) + return args + + +def run_remote_command( + server: SshEndpoint, + command: list[str], + proxy_chain: list[SshEndpoint] | None = None, +) -> subprocess.CompletedProcess[str]: + """Run a command on a remote host via SSH.""" + cmd_string = " ".join(shlex.quote(arg) for arg in command) + args = build_ssh_base_args(server, proxy_chain) + [cmd_string] + return subprocess.run( + args, + capture_output=True, + text=True, + ) + + +def build_ssh_e_option( + server: SshEndpoint, + proxy_chain: list[SshEndpoint] | None = None, +) -> list[str]: + """Build rsync's -e option for SSH with custom port/key. + + Returns a list like: + ["-e", "ssh -o ConnectTimeout=10 -o BatchMode=yes ..."] + """ + ssh_cmd_parts = ["ssh"] + for opt in _ssh_o_options(server.connection_options): + ssh_cmd_parts.extend(["-o", opt]) + if server.port != 22: + ssh_cmd_parts.extend(["-p", str(server.port)]) + if server.key: + ssh_cmd_parts.extend(["-i", server.key]) + if proxy_chain: + proxy_cmd = _build_proxy_command(proxy_chain) + quoted = shlex.quote(f"ProxyCommand={proxy_cmd}") + ssh_cmd_parts.extend(["-o", quoted]) + + return ["-e", " ".join(ssh_cmd_parts)] + + +def format_remote_path(server: SshEndpoint, path: str) -> str: + """Format a remote path as [user@]host:path.""" + host = f"{server.user}@{server.host}" if server.user else server.host + return f"{host}:{path}" diff --git a/nbkp/scriptgen.py b/nbkp/scriptgen.py new file mode 100644 index 0000000..e973046 --- /dev/null +++ b/nbkp/scriptgen.py @@ -0,0 +1,1180 @@ +"""Generate a standalone bash script from nbkp config. + +Compiles a Config into a self-contained shell script that performs +the same sync operations as ``nbkp run``, with all paths and +options baked in. The generated script accepts ``--dry-run`` +and ``--progress`` flags at runtime. +""" + +from __future__ import annotations + +import importlib.resources +import os +import shlex +from dataclasses import dataclass +from datetime import datetime, timezone +from textwrap import dedent + +from jinja2 import Environment, Template + +from .config import ( + Config, + LocalVolume, + RemoteVolume, + ResolvedEndpoints, + SshEndpoint, + SyncConfig, +) +from .remote.ssh import build_ssh_base_args +from .sync.rsync import build_rsync_command + +# ── Public API ──────────────────────────────────────────────── + + +@dataclass(frozen=True) +class ScriptOptions: + """Options for script generation.""" + + config_path: str | None = None + output_file: str | None = None + relative_src: bool = False + relative_dst: bool = False + + +def generate_script( + config: Config, + options: ScriptOptions, + *, + now: datetime | None = None, + resolved_endpoints: ResolvedEndpoints | None = None, +) -> str: + """Generate a standalone bash script from config.""" + re = resolved_endpoints or {} + if now is None: + now = datetime.now(timezone.utc) + vol_paths = _build_vol_paths(config, options) + ctx = _build_script_context(config, options, vol_paths, now, re) + template = _load_template() + return template.render(ctx) + "\n" + + +# ── Context dataclasses ────────────────────────────────────── + + +@dataclass(frozen=True) +class _SyncContext: + slug: str + fn_name: str + enabled: bool + has_btrfs: bool = False + has_hard_link: bool = False + has_prune: bool = False + max_snapshots: int | None = None + preflight: str = "" + link_dest: str = "" + rsync: str = "" + snapshot: str = "" + prune: str = "" + orphan_cleanup: str = "" + hl_mkdir: str = "" + symlink: str = "" + hl_prune: str = "" + disabled_body: str = "" + + +# ── Template loading ───────────────────────────────────────── + + +def _load_template() -> Template: + """Load the Jinja2 template with custom delimiters.""" + tpl_text = ( + importlib.resources.files("nbkp.templates") + .joinpath("backup.sh.j2") + .read_text(encoding="utf-8") + ) + env = Environment( + variable_start_string="${{", + variable_end_string="}}", + block_start_string="<%", + block_end_string="%>", + comment_start_string="<#", + comment_end_string="#>", + trim_blocks=True, + lstrip_blocks=True, + keep_trailing_newline=True, + ) + return env.from_string(tpl_text) + + +# ── Path helpers ───────────────────────────────────────────── + + +def _build_vol_paths( + config: Config, + options: ScriptOptions, +) -> dict[str, str]: + """Compute volume slug -> effective path.""" + src_slugs = {s.source.volume for s in config.syncs.values()} + dst_slugs = {s.destination.volume for s in config.syncs.values()} + + vol_paths: dict[str, str] = {} + for slug, vol in config.volumes.items(): + match vol: + case RemoteVolume(): + vol_paths[slug] = vol.path + case LocalVolume(): + should_relativize = ( + slug in src_slugs and options.relative_src + ) or (slug in dst_slugs and options.relative_dst) + if should_relativize and options.output_file: + output_dir = os.path.dirname(options.output_file) + rel = os.path.relpath(vol.path, output_dir) + vol_paths[slug] = f"${{NBKP_SCRIPT_DIR}}/{rel}" + else: + vol_paths[slug] = vol.path + return vol_paths + + +def _vol_path( + vol_paths: dict[str, str], + slug: str, + subdir: str | None = None, +) -> str: + base = vol_paths[slug] + if subdir: + return f"{base}/{subdir}" + return base + + +def _substitute_vol_path( + arg: str, + vol: LocalVolume | RemoteVolume, + vol_paths: dict[str, str], + slug: str, +) -> str: + """Replace absolute volume path prefix with vol_paths.""" + match vol: + case RemoteVolume(): + return arg + case LocalVolume(): + return arg.replace(vol.path, vol_paths[slug], 1) + + +# ── Shell formatting helpers ───────────────────────────────── + + +def _sq(s: str) -> str: + """Shell-quote (single quotes, no variable expansion).""" + return shlex.quote(s) + + +def _qp(s: str) -> str: + """Quote a path; double-quote if it contains $.""" + if "$" not in s: + return _sq(s) + return f'"{s}"' + + +def _slug_to_fn(slug: str) -> str: + return f"sync_{slug.replace('-', '_')}" + + +def _format_shell_command( + cmd: list[str], + cont_indent: str = " ", +) -> str: + """Format command list with backslash continuations.""" + parts = [_qp(arg) for arg in cmd] + if len(parts) <= 3: + return " ".join(parts) + sep = f" \\\n{cont_indent}" + return parts[0] + sep + sep.join(parts[1:]) + + +# ── SSH command helpers ────────────────────────────────────── + + +def _format_remote_test( + server: SshEndpoint, + proxy_chain: list[SshEndpoint], + test_args: list[str], +) -> str: + ssh_args = build_ssh_base_args(server, proxy_chain) + remote_cmd = "test " + " ".join(shlex.quote(a) for a in test_args) + return " ".join(_sq(a) for a in ssh_args) + " " + _sq(remote_cmd) + + +def _format_remote_check( + server: SshEndpoint, + proxy_chain: list[SshEndpoint], + cmd: list[str], +) -> str: + ssh_args = build_ssh_base_args(server, proxy_chain) + remote_cmd = " ".join(shlex.quote(a) for a in cmd) + return ( + " ".join(_sq(a) for a in ssh_args) + + " " + + _sq(remote_cmd) + + " >/dev/null 2>&1" + ) + + +def _format_remote_command_str( + server: SshEndpoint, + proxy_chain: list[SshEndpoint], + cmd: list[str], +) -> str: + ssh_args = build_ssh_base_args(server, proxy_chain) + remote_cmd = " ".join(shlex.quote(a) for a in cmd) + return " ".join(_sq(a) for a in ssh_args) + " " + _sq(remote_cmd) + + +# ── Local/remote dispatch helpers ──────────────────────────── + + +def _test_cmd( + vol: LocalVolume | RemoteVolume, + test_args: list[str], + resolved_endpoints: ResolvedEndpoints, +) -> str: + """Shell expression for `test ... `.""" + match vol: + case LocalVolume(): + return "test " + " ".join(_qp(a) for a in test_args) + case RemoteVolume(): + ep = resolved_endpoints[vol.slug] + return _format_remote_test(ep.server, ep.proxy_chain, test_args) + + +def _which_cmd( + vol: LocalVolume | RemoteVolume, + command: str, + resolved_endpoints: ResolvedEndpoints, +) -> str: + """Shell expression to check command availability.""" + match vol: + case LocalVolume(): + return f"command -v {_sq(command)} >/dev/null 2>&1" + case RemoteVolume(): + ep = resolved_endpoints[vol.slug] + return _format_remote_check( + ep.server, ep.proxy_chain, ["which", command] + ) + + +def _ls_snapshots_cmd( + dst_vol: LocalVolume | RemoteVolume, + snaps_dir: str, + resolved_endpoints: ResolvedEndpoints, +) -> str: + """Shell expression to list snapshot dirs.""" + match dst_vol: + case LocalVolume(): + return f"ls {_qp(snaps_dir)}" + case RemoteVolume(): + ep = resolved_endpoints[dst_vol.slug] + return _format_remote_command_str( + ep.server, ep.proxy_chain, ["ls", snaps_dir] + ) + + +def _snapshot_cmd( + dst_vol: LocalVolume | RemoteVolume, + latest: str, + snaps_dir: str, + resolved_endpoints: ResolvedEndpoints, +) -> str: + """Shell command to create a btrfs snapshot.""" + snap_args = [ + "btrfs", + "subvolume", + "snapshot", + "-r", + latest, + f"{snaps_dir}/$NBKP_TS", + ] + match dst_vol: + case LocalVolume(): + return _format_shell_command(snap_args, cont_indent=" ") + case RemoteVolume(): + ep = resolved_endpoints[dst_vol.slug] + remote_args = [ + "btrfs", + "subvolume", + "snapshot", + "-r", + latest, + f"{snaps_dir}/\\$NBKP_TS", + ] + return _format_remote_command_str( + ep.server, ep.proxy_chain, remote_args + ) + + +def _btrfs_prop_cmd( + dst_vol: LocalVolume | RemoteVolume, + snaps_dir: str, + resolved_endpoints: ResolvedEndpoints, +) -> str: + """Shell command to set ro=false on $snap.""" + match dst_vol: + case LocalVolume(): + return ( + f"btrfs property set" f' {_qp(snaps_dir)}/"$snap"' f" ro false" + ) + case RemoteVolume(): + ep = resolved_endpoints[dst_vol.slug] + return _format_remote_command_str( + ep.server, + ep.proxy_chain, + [ + "btrfs", + "property", + "set", + f"{snaps_dir}/\\$snap", + "ro", + "false", + ], + ) + + +def _btrfs_del_cmd( + dst_vol: LocalVolume | RemoteVolume, + snaps_dir: str, + resolved_endpoints: ResolvedEndpoints, +) -> str: + """Shell command to delete $snap.""" + match dst_vol: + case LocalVolume(): + return f"btrfs subvolume delete" f' {_qp(snaps_dir)}/"$snap"' + case RemoteVolume(): + ep = resolved_endpoints[dst_vol.slug] + return _format_remote_command_str( + ep.server, + ep.proxy_chain, + [ + "btrfs", + "subvolume", + "delete", + f"{snaps_dir}/\\$snap", + ], + ) + + +# ── Hard-link command helpers ──────────────────────────────── + + +def _readlink_cmd( + dst_vol: LocalVolume | RemoteVolume, + path: str, + resolved_endpoints: ResolvedEndpoints, +) -> str: + """Shell expression that outputs the symlink target.""" + match dst_vol: + case LocalVolume(): + return f"readlink {_qp(path)}" + case RemoteVolume(): + ep = resolved_endpoints[dst_vol.slug] + return _format_remote_command_str( + ep.server, ep.proxy_chain, ["readlink", path] + ) + + +def _rm_rf_snap_cmd( + dst_vol: LocalVolume | RemoteVolume, + snaps_dir: str, + resolved_endpoints: ResolvedEndpoints, +) -> str: + """Shell command to rm -rf {snaps_dir}/$snap (loop variable).""" + match dst_vol: + case LocalVolume(): + return f'rm -rf {_qp(snaps_dir)}/"$snap"' + case RemoteVolume(): + ep = resolved_endpoints[dst_vol.slug] + ssh_pfx = " ".join( + _sq(a) for a in build_ssh_base_args(ep.server, ep.proxy_chain) + ) + return f'{ssh_pfx} "rm -rf {snaps_dir}/$snap"' + + +def _mkdir_snap_cmd( + dst_vol: LocalVolume | RemoteVolume, + snaps_dir: str, + resolved_endpoints: ResolvedEndpoints, +) -> str: + """Shell command to mkdir -p {snaps_dir}/$NBKP_TS.""" + match dst_vol: + case LocalVolume(): + return f'mkdir -p {_qp(snaps_dir)}/"$NBKP_TS"' + case RemoteVolume(): + ep = resolved_endpoints[dst_vol.slug] + ssh_pfx = " ".join( + _sq(a) for a in build_ssh_base_args(ep.server, ep.proxy_chain) + ) + return f'{ssh_pfx} "mkdir -p {snaps_dir}/$NBKP_TS"' + + +def _ln_sfn_cmd( + dst_vol: LocalVolume | RemoteVolume, + dest_path: str, + resolved_endpoints: ResolvedEndpoints, +) -> str: + """Shell command for ln -sfn snapshots/$NBKP_TS {dest}/latest.""" + match dst_vol: + case LocalVolume(): + return f'ln -sfn "snapshots/$NBKP_TS"' f" {_qp(dest_path)}/latest" + case RemoteVolume(): + ep = resolved_endpoints[dst_vol.slug] + ssh_pfx = " ".join( + _sq(a) for a in build_ssh_base_args(ep.server, ep.proxy_chain) + ) + return ( + f"{ssh_pfx}" + f' "ln -sfn snapshots/$NBKP_TS' + f' {dest_path}/latest"' + ) + + +# ── Block builders (textwrap.dedent) ───────────────────────── + + +def _build_check_line( + vol: LocalVolume | RemoteVolume, + test_args: list[str], + error_msg: str, + resolved_endpoints: ResolvedEndpoints, +) -> str: + cmd = _test_cmd(vol, test_args, resolved_endpoints) + return f"{cmd}" f' || {{ nbkp_log "ERROR: {error_msg}"; return 1; }}' + + +def _build_which_line( + vol: LocalVolume | RemoteVolume, + command: str, + error_msg: str, + resolved_endpoints: ResolvedEndpoints, +) -> str: + check = _which_cmd(vol, command, resolved_endpoints) + return f"{check}" f' || {{ nbkp_log "ERROR: {error_msg}"; return 1; }}' + + +def _build_preflight_block( + sync: SyncConfig, + config: Config, + vol_paths: dict[str, str], + resolved_endpoints: ResolvedEndpoints, +) -> str: + """Build preflight check lines at indent 0.""" + src_vol = config.volumes[sync.source.volume] + dst_vol = config.volumes[sync.destination.volume] + src_path = _vol_path(vol_paths, sync.source.volume, sync.source.subdir) + dst_path = _vol_path( + vol_paths, + sync.destination.volume, + sync.destination.subdir, + ) + + lines: list[str] = [] + + # Source endpoint sentinel + src_sentinel = f"{src_path}/.nbkp-src" + lines.append( + _build_check_line( + src_vol, + ["-f", src_sentinel], + f"source sentinel {src_sentinel} not found", + resolved_endpoints, + ) + ) + + # Source snapshot: verify latest/ and snapshots/ exist + if sync.source.snapshot_mode != "none": + src_latest = f"{src_path}/latest" + lines.append( + _build_check_line( + src_vol, + ["-d", src_latest], + ("source latest/ not found" f" ({src_latest})"), + resolved_endpoints, + ) + ) + src_snapshots = f"{src_path}/snapshots" + lines.append( + _build_check_line( + src_vol, + ["-d", src_snapshots], + ("source snapshots/ not found" f" ({src_snapshots})"), + resolved_endpoints, + ) + ) + + # Destination endpoint sentinel + dst_sentinel = f"{dst_path}/.nbkp-dst" + lines.append( + _build_check_line( + dst_vol, + ["-f", dst_sentinel], + f"destination sentinel {dst_sentinel} not found", + resolved_endpoints, + ) + ) + + # rsync on source + lines.append( + _build_which_line( + src_vol, + "rsync", + "rsync not found on source", + resolved_endpoints, + ) + ) + + # rsync on destination + lines.append( + _build_which_line( + dst_vol, + "rsync", + "rsync not found on destination", + resolved_endpoints, + ) + ) + + # Btrfs checks + if sync.destination.btrfs_snapshots.enabled: + lines.append( + _build_which_line( + dst_vol, + "btrfs", + "btrfs not found on destination", + resolved_endpoints, + ) + ) + latest_dir = f"{dst_path}/latest" + lines.append( + _build_check_line( + dst_vol, + ["-d", latest_dir], + "destination latest/ directory not found" f" ({latest_dir})", + resolved_endpoints, + ) + ) + snaps_dir = f"{dst_path}/snapshots" + lines.append( + _build_check_line( + dst_vol, + ["-d", snaps_dir], + "destination snapshots/ directory not found" f" ({snaps_dir})", + resolved_endpoints, + ) + ) + + # Hard-link checks + if sync.destination.hard_link_snapshots.enabled: + snaps_dir = f"{dst_path}/snapshots" + lines.append( + _build_check_line( + dst_vol, + ["-d", snaps_dir], + "destination snapshots/ directory not found" f" ({snaps_dir})", + resolved_endpoints, + ) + ) + + return "\n".join(lines) + + +def _build_link_dest_block( + sync: SyncConfig, + config: Config, + vol_paths: dict[str, str], + resolved_endpoints: ResolvedEndpoints, + *, + link_dest_prefix: str = "../", +) -> str: + """Build link-dest resolution block at indent 0. + + ``link_dest_prefix`` is the relative path from the rsync + destination to the snapshots directory (``../`` for hard-link + where rsync writes to ``snapshots/{ts}/``). + """ + dst_vol = config.volumes[sync.destination.volume] + dest_path = _vol_path( + vol_paths, + sync.destination.volume, + sync.destination.subdir, + ) + snaps_dir = f"{dest_path}/snapshots" + ls_cmd = _ls_snapshots_cmd(dst_vol, snaps_dir, resolved_endpoints) + + return dedent(f"""\ + NBKP_LATEST_SNAP=$({ls_cmd} 2>/dev/null | sort | tail -1) + RSYNC_LINK_DEST="" + if [ -n "$NBKP_LATEST_SNAP" ]; then + RSYNC_LINK_DEST="--link-dest={link_dest_prefix}$NBKP_LATEST_SNAP" + fi""") + + +def _build_rsync_block( + sync: SyncConfig, + config: Config, + vol_paths: dict[str, str], + resolved_endpoints: ResolvedEndpoints, + *, + dest_suffix: str | None = None, + has_link_dest: bool = False, +) -> str: + """Build rsync command block at indent 0.""" + i2 = " " # continuation indent within this block + cmd = build_rsync_command( + sync, + config, + dry_run=False, + link_dest=None, + progress=None, + resolved_endpoints=resolved_endpoints, + dest_suffix=dest_suffix, + ) + + # Substitute local volume paths + src_vol = config.volumes[sync.source.volume] + dst_vol = config.volumes[sync.destination.volume] + match (src_vol, dst_vol): + case (RemoteVolume(), RemoteVolume()): + pass + case _: + cmd[-2] = _substitute_vol_path( + cmd[-2], + src_vol, + vol_paths, + sync.source.volume, + ) + cmd[-1] = _substitute_vol_path( + cmd[-1], + dst_vol, + vol_paths, + sync.destination.volume, + ) + + formatted = _format_shell_command(cmd, cont_indent=i2) + + runtime_vars = [ + '${RSYNC_DRY_RUN_FLAG:+"$RSYNC_DRY_RUN_FLAG"}', + "$RSYNC_PROGRESS_FLAGS", + ] + if has_link_dest: + runtime_vars.insert(0, '${RSYNC_LINK_DEST:+"$RSYNC_LINK_DEST"}') + runtime_suffix = f" \\\n{i2}".join(runtime_vars) + return f"{formatted} \\\n{i2}{runtime_suffix}" + + +def _build_snapshot_block( + sync: SyncConfig, + config: Config, + vol_paths: dict[str, str], + resolved_endpoints: ResolvedEndpoints, +) -> str: + """Build btrfs snapshot block at indent 0.""" + dst_vol = config.volumes[sync.destination.volume] + dest_path = _vol_path( + vol_paths, + sync.destination.volume, + sync.destination.subdir, + ) + latest = f"{dest_path}/latest" + snaps_dir = f"{dest_path}/snapshots" + snap = _snapshot_cmd(dst_vol, latest, snaps_dir, resolved_endpoints) + + return dedent(f"""\ + if [ "$NBKP_DRY_RUN" = false ]; then + NBKP_TS=$(date -u +%Y-%m-%dT%H:%M:%S.000Z) + {snap} + fi""") + + +def _build_prune_block( + sync: SyncConfig, + config: Config, + max_snapshots: int, + vol_paths: dict[str, str], + resolved_endpoints: ResolvedEndpoints, +) -> str: + """Build btrfs prune block at indent 0.""" + dst_vol = config.volumes[sync.destination.volume] + dest_path = _vol_path( + vol_paths, + sync.destination.volume, + sync.destination.subdir, + ) + snaps_dir = f"{dest_path}/snapshots" + ls_cmd = _ls_snapshots_cmd(dst_vol, snaps_dir, resolved_endpoints) + prop_cmd = _btrfs_prop_cmd(dst_vol, snaps_dir, resolved_endpoints) + del_cmd = _btrfs_del_cmd(dst_vol, snaps_dir, resolved_endpoints) + + # fmt: off + pipe_while = ( + 'echo "$NBKP_SNAPS"' + ' | head -n "$NBKP_EXCESS"' + " | while IFS= read -r snap; do" + ) + # fmt: on + return dedent(f"""\ + if [ "$NBKP_DRY_RUN" = false ]; then + NBKP_SNAPS=$({ls_cmd} | sort) + NBKP_COUNT=$(echo "$NBKP_SNAPS" | wc -l | tr -d ' ') + NBKP_EXCESS=$((NBKP_COUNT - {max_snapshots})) + if [ "$NBKP_EXCESS" -gt 0 ]; then + {pipe_while} + nbkp_log "Pruning snapshot: $snap" + {prop_cmd} + {del_cmd} + done + fi + fi""") + + +def _build_hard_link_orphan_cleanup_block( + sync: SyncConfig, + config: Config, + vol_paths: dict[str, str], + resolved_endpoints: ResolvedEndpoints, +) -> str: + """Build orphan cleanup block for hard-link snapshots.""" + dst_vol = config.volumes[sync.destination.volume] + dest_path = _vol_path( + vol_paths, + sync.destination.volume, + sync.destination.subdir, + ) + latest_path = f"{dest_path}/latest" + snaps_dir = f"{dest_path}/snapshots" + rl_cmd = _readlink_cmd(dst_vol, latest_path, resolved_endpoints) + ls_cmd = _ls_snapshots_cmd(dst_vol, snaps_dir, resolved_endpoints) + rm_cmd = _rm_rf_snap_cmd(dst_vol, snaps_dir, resolved_endpoints) + + return dedent(f"""\ + NBKP_LATEST_LINK=$({rl_cmd} 2>/dev/null || true) + if [ -n "$NBKP_LATEST_LINK" ]; then + NBKP_LATEST_NAME="${{NBKP_LATEST_LINK##*/}}" + for snap in $({ls_cmd} 2>/dev/null | sort); do + if [ "$snap" \\> "$NBKP_LATEST_NAME" ]; then + nbkp_log "Removing orphaned snapshot: $snap" + {rm_cmd} + fi + done + fi""") + + +def _build_hard_link_mkdir_block( + sync: SyncConfig, + config: Config, + vol_paths: dict[str, str], + resolved_endpoints: ResolvedEndpoints, +) -> str: + """Build snapshot directory creation block.""" + dst_vol = config.volumes[sync.destination.volume] + dest_path = _vol_path( + vol_paths, + sync.destination.volume, + sync.destination.subdir, + ) + snaps_dir = f"{dest_path}/snapshots" + mkdir_cmd = _mkdir_snap_cmd(dst_vol, snaps_dir, resolved_endpoints) + + return dedent(f"""\ + NBKP_TS=$(date -u +%Y-%m-%dT%H:%M:%S.000Z) + {mkdir_cmd}""") + + +def _build_hard_link_symlink_block( + sync: SyncConfig, + config: Config, + vol_paths: dict[str, str], + resolved_endpoints: ResolvedEndpoints, +) -> str: + """Build latest symlink update block.""" + dst_vol = config.volumes[sync.destination.volume] + dest_path = _vol_path( + vol_paths, + sync.destination.volume, + sync.destination.subdir, + ) + ln_cmd = _ln_sfn_cmd(dst_vol, dest_path, resolved_endpoints) + + return dedent(f"""\ + if [ "$NBKP_DRY_RUN" = false ]; then + {ln_cmd} + fi""") + + +def _build_hard_link_prune_block( + sync: SyncConfig, + config: Config, + max_snapshots: int, + vol_paths: dict[str, str], + resolved_endpoints: ResolvedEndpoints, +) -> str: + """Build hard-link prune block (rm -rf, skip latest).""" + dst_vol = config.volumes[sync.destination.volume] + dest_path = _vol_path( + vol_paths, + sync.destination.volume, + sync.destination.subdir, + ) + latest_path = f"{dest_path}/latest" + snaps_dir = f"{dest_path}/snapshots" + ls_cmd = _ls_snapshots_cmd(dst_vol, snaps_dir, resolved_endpoints) + rl_cmd = _readlink_cmd(dst_vol, latest_path, resolved_endpoints) + rm_cmd = _rm_rf_snap_cmd(dst_vol, snaps_dir, resolved_endpoints) + + # fmt: off + pipe_while = ( + 'echo "$NBKP_SNAPS"' + ' | head -n "$NBKP_EXCESS"' + " | while IFS= read -r snap; do" + ) + # fmt: on + return dedent(f"""\ + if [ "$NBKP_DRY_RUN" = false ]; then + NBKP_SNAPS=$({ls_cmd} | sort) + NBKP_COUNT=$(echo "$NBKP_SNAPS" | wc -l | tr -d ' ') + NBKP_EXCESS=$((NBKP_COUNT - {max_snapshots})) + NBKP_LATEST_LINK=$({rl_cmd} 2>/dev/null || true) + NBKP_LATEST_NAME="${{NBKP_LATEST_LINK##*/}}" + if [ "$NBKP_EXCESS" -gt 0 ]; then + {pipe_while} + if [ "$snap" != "$NBKP_LATEST_NAME" ]; then + nbkp_log "Pruning snapshot: $snap" + {rm_cmd} + fi + done + fi + fi""") + + +# ── Volume check builder ──────────────────────────────────── + + +def _build_volume_check( + slug: str, + vol: LocalVolume | RemoteVolume, + vol_paths: dict[str, str], + resolved_endpoints: ResolvedEndpoints, +) -> str: + vpath = vol_paths[slug] + sentinel = f"{vpath}/.nbkp-vol" + match vol: + case LocalVolume(): + test_cmd = f"test -f {_qp(sentinel)}" + case RemoteVolume(): + ep = resolved_endpoints[vol.slug] + test_cmd = _format_remote_test( + ep.server, ep.proxy_chain, ["-f", sentinel] + ) + return ( + f"{test_cmd}" + f" || {{ nbkp_log" + f' "WARN: volume {slug}:' + f' sentinel {sentinel} not found";' + f" }}" + ) + + +# ── Disabled sync body ─────────────────────────────────────── + + +def _build_disabled_body( + slug: str, + sync: SyncConfig, + config: Config, + vol_paths: dict[str, str], + resolved_endpoints: ResolvedEndpoints, +) -> str: + """Build the commented-out function body for a disabled sync.""" + enabled_sync = SyncConfig( + slug=sync.slug, + source=sync.source, + destination=sync.destination, + enabled=True, + rsync_options=sync.rsync_options, + filters=sync.filters, + filter_file=sync.filter_file, + ) + ctx = _build_sync_context( + slug, + enabled_sync, + config, + vol_paths, + resolved_endpoints, + ) + + # Render the function body the same way the template would + lines = _render_enabled_function(ctx) + return "\n".join( + f"# {line}" if line.strip() else "#" for line in lines.split("\n") + ) + + +def _render_enabled_function(ctx: _SyncContext) -> str: + """Render a sync function body (for disabled commenting).""" + parts: list[str] = [] + parts.append("") + parts.append(f"{ctx.fn_name}() {{") + parts.append(f' nbkp_log "Starting sync: {ctx.slug}"') + parts.append("") + parts.append(" # Pre-flight checks") + for line in ctx.preflight.split("\n"): + parts.append(f" {line}" if line else "") + if ctx.has_hard_link: + parts.append("") + parts.append(" # Cleanup orphaned snapshots") + for line in ctx.orphan_cleanup.split("\n"): + parts.append(f" {line}" if line else "") + parts.append("") + parts.append( + " # Link-dest resolution" + " (latest snapshot for incremental backup)" + ) + for line in ctx.link_dest.split("\n"): + parts.append(f" {line}" if line else "") + parts.append("") + parts.append(" # Create snapshot directory") + for line in ctx.hl_mkdir.split("\n"): + parts.append(f" {line}" if line else "") + if ctx.has_btrfs: + parts.append("") + parts.append( + " # Link-dest resolution" + " (latest snapshot for incremental backup)" + ) + for line in ctx.link_dest.split("\n"): + parts.append(f" {line}" if line else "") + parts.append("") + parts.append(" # Rsync") + for line in ctx.rsync.split("\n"): + parts.append(f" {line}" if line else "") + if ctx.has_btrfs: + parts.append("") + parts.append(" # Btrfs snapshot (skip if dry-run)") + for line in ctx.snapshot.split("\n"): + parts.append(f" {line}" if line else "") + if ctx.has_prune: + parts.append("") + parts.append( + f" # Prune old snapshots" f" (max: {ctx.max_snapshots})" + ) + for line in ctx.prune.split("\n"): + parts.append(f" {line}" if line else "") + if ctx.has_hard_link: + parts.append("") + parts.append(" # Update latest symlink (skip if dry-run)") + for line in ctx.symlink.split("\n"): + parts.append(f" {line}" if line else "") + if ctx.has_prune: + parts.append("") + parts.append( + f" # Prune old snapshots" f" (max: {ctx.max_snapshots})" + ) + for line in ctx.hl_prune.split("\n"): + parts.append(f" {line}" if line else "") + parts.append("") + parts.append(f' nbkp_log "Completed sync: {ctx.slug}"') + parts.append("}") + return "\n".join(parts) + + +# ── Context builders ───────────────────────────────────────── + + +def _build_sync_context( + slug: str, + sync: SyncConfig, + config: Config, + vol_paths: dict[str, str], + resolved_endpoints: ResolvedEndpoints, +) -> _SyncContext: + """Build a _SyncContext with all pre-computed blocks.""" + has_btrfs = sync.destination.btrfs_snapshots.enabled + has_hard_link = sync.destination.hard_link_snapshots.enabled + btrfs_cfg = sync.destination.btrfs_snapshots + hl_cfg = sync.destination.hard_link_snapshots + + has_prune = (has_btrfs and btrfs_cfg.max_snapshots is not None) or ( + has_hard_link and hl_cfg.max_snapshots is not None + ) + max_snaps = ( + btrfs_cfg.max_snapshots + if has_btrfs + else hl_cfg.max_snapshots if has_hard_link else None + ) + + preflight = _build_preflight_block( + sync, config, vol_paths, resolved_endpoints + ) + + # Link-dest: only for hard-link (removed from btrfs) + link_dest = ( + _build_link_dest_block( + sync, + config, + vol_paths, + resolved_endpoints, + link_dest_prefix="../", + ) + if has_hard_link + else "" + ) + + # Rsync block + if has_hard_link: + rsync = _build_rsync_block( + sync, + config, + vol_paths, + resolved_endpoints, + dest_suffix="snapshots/$NBKP_TS", + has_link_dest=True, + ) + elif has_btrfs: + rsync = _build_rsync_block( + sync, + config, + vol_paths, + resolved_endpoints, + dest_suffix="latest", + ) + else: + rsync = _build_rsync_block( + sync, + config, + vol_paths, + resolved_endpoints, + dest_suffix=None, + ) + + # Btrfs blocks + snapshot = ( + _build_snapshot_block(sync, config, vol_paths, resolved_endpoints) + if has_btrfs + else "" + ) + prune = ( + _build_prune_block( + sync, + config, + max_snaps, + vol_paths, + resolved_endpoints, + ) + if has_btrfs and max_snaps is not None + else "" + ) + + # Hard-link blocks + orphan_cleanup = ( + _build_hard_link_orphan_cleanup_block( + sync, config, vol_paths, resolved_endpoints + ) + if has_hard_link + else "" + ) + hl_mkdir = ( + _build_hard_link_mkdir_block( + sync, config, vol_paths, resolved_endpoints + ) + if has_hard_link + else "" + ) + symlink = ( + _build_hard_link_symlink_block( + sync, config, vol_paths, resolved_endpoints + ) + if has_hard_link + else "" + ) + hl_prune = ( + _build_hard_link_prune_block( + sync, + config, + max_snaps, + vol_paths, + resolved_endpoints, + ) + if has_hard_link and max_snaps is not None + else "" + ) + + return _SyncContext( + slug=slug, + fn_name=_slug_to_fn(slug), + enabled=sync.enabled, + has_btrfs=has_btrfs, + has_hard_link=has_hard_link, + has_prune=has_prune, + max_snapshots=max_snaps, + preflight=preflight, + link_dest=link_dest, + rsync=rsync, + snapshot=snapshot, + prune=prune, + orphan_cleanup=orphan_cleanup, + hl_mkdir=hl_mkdir, + symlink=symlink, + hl_prune=hl_prune, + ) + + +def _build_script_context( + config: Config, + options: ScriptOptions, + vol_paths: dict[str, str], + now: datetime, + resolved_endpoints: ResolvedEndpoints, +) -> dict[str, object]: + """Build the full template context dict.""" + timestamp = now.isoformat(timespec="seconds").replace("+00:00", "Z") + config_line = ( + f"# Config: {options.config_path}" + if options.config_path + else "# Config: " + ) + has_script_dir = any("$" in p for p in vol_paths.values()) + + volume_checks = [ + _build_volume_check(slug, vol, vol_paths, resolved_endpoints) + for slug, vol in config.volumes.items() + ] + + from .sync.ordering import sort_syncs + + syncs: list[_SyncContext] = [] + for slug in sort_syncs(config.syncs): + sync = config.syncs[slug] + ctx = _build_sync_context( + slug, sync, config, vol_paths, resolved_endpoints + ) + if sync.enabled: + syncs.append(ctx) + else: + disabled_body = _build_disabled_body( + slug, + sync, + config, + vol_paths, + resolved_endpoints, + ) + syncs.append( + _SyncContext( + slug=ctx.slug, + fn_name=ctx.fn_name, + enabled=False, + disabled_body=disabled_body, + ) + ) + + return { + "timestamp": timestamp, + "config_line": config_line, + "has_script_dir": has_script_dir, + "volume_checks": volume_checks, + "syncs": syncs, + } diff --git a/nbkp/sync/__init__.py b/nbkp/sync/__init__.py new file mode 100644 index 0000000..e0899d7 --- /dev/null +++ b/nbkp/sync/__init__.py @@ -0,0 +1,34 @@ +"""Sync orchestration and rsync command building.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from .rsync import ProgressMode as ProgressMode + +if TYPE_CHECKING: + from .runner import PruneResult as PruneResult + from .runner import SyncResult as SyncResult + from .runner import run_all_syncs as run_all_syncs + +__all__ = [ + "ProgressMode", + "PruneResult", + "SyncResult", + "run_all_syncs", +] + + +def __getattr__(name: str) -> object: + if name in __all__: + from . import runner + + globals().update( + { + "PruneResult": runner.PruneResult, + "SyncResult": runner.SyncResult, + "run_all_syncs": runner.run_all_syncs, + } + ) + return globals()[name] + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/nbkp/sync/btrfs.py b/nbkp/sync/btrfs.py new file mode 100644 index 0000000..60f0d0d --- /dev/null +++ b/nbkp/sync/btrfs.py @@ -0,0 +1,196 @@ +"""Btrfs snapshot creation, lookup, and pruning.""" + +from __future__ import annotations + +import subprocess +from datetime import datetime, timezone + +from ..config import ( + Config, + LocalVolume, + RemoteVolume, + ResolvedEndpoints, + SyncConfig, + Volume, +) +from ..remote import run_remote_command + + +def resolve_dest_path(sync: SyncConfig, config: Config) -> str: + """Resolve the destination path for a sync.""" + vol = config.volumes[sync.destination.volume] + if sync.destination.subdir: + return f"{vol.path}/{sync.destination.subdir}" + else: + return vol.path + + +def create_snapshot( + sync: SyncConfig, + config: Config, + *, + now: datetime | None = None, + resolved_endpoints: ResolvedEndpoints | None = None, +) -> str: + """Create a read-only btrfs snapshot of latest/ into snapshots/. + + Returns the snapshot path. + """ + re = resolved_endpoints or {} + if now is None: + now = datetime.now(timezone.utc) + dest_path = resolve_dest_path(sync, config) + # isoformat uses +00:00, but Z is more conventional for UTC. + timestamp = now.isoformat(timespec="milliseconds").replace("+00:00", "Z") + snapshot_path = f"{dest_path}/snapshots/{timestamp}" + latest_path = f"{dest_path}/latest" + + cmd = [ + "btrfs", + "subvolume", + "snapshot", + "-r", + latest_path, + snapshot_path, + ] + + dst_vol = config.volumes[sync.destination.volume] + match dst_vol: + case RemoteVolume(): + ep = re[dst_vol.slug] + result = run_remote_command(ep.server, cmd, ep.proxy_chain) + case LocalVolume(): + result = subprocess.run( + cmd, + capture_output=True, + text=True, + ) + + if result.returncode != 0: + raise RuntimeError(f"btrfs snapshot failed: {result.stderr}") + else: + return snapshot_path + + +def list_snapshots( + sync: SyncConfig, + config: Config, + resolved_endpoints: ResolvedEndpoints | None = None, +) -> list[str]: + """List all snapshot paths sorted oldest-first.""" + re = resolved_endpoints or {} + dest_path = resolve_dest_path(sync, config) + snapshots_dir = f"{dest_path}/snapshots" + + dst_vol = config.volumes[sync.destination.volume] + match dst_vol: + case RemoteVolume(): + ep = re[dst_vol.slug] + result = run_remote_command( + ep.server, ["ls", snapshots_dir], ep.proxy_chain + ) + case LocalVolume(): + result = subprocess.run( + ["ls", snapshots_dir], + capture_output=True, + text=True, + ) + + if result.returncode != 0 or not result.stdout.strip(): + return [] + else: + entries = sorted(result.stdout.strip().split("\n")) + return [f"{snapshots_dir}/{e}" for e in entries] + + +def get_latest_snapshot( + sync: SyncConfig, + config: Config, + resolved_endpoints: ResolvedEndpoints | None = None, +) -> str | None: + """Get the path to the most recent snapshot, or None.""" + snapshots = list_snapshots(sync, config, resolved_endpoints) + if snapshots: + return snapshots[-1] + else: + return None + + +def _make_snapshot_writable( + path: str, + volume: Volume, + resolved_endpoints: ResolvedEndpoints, +) -> None: + """Unset the readonly property so the snapshot can be deleted.""" + cmd = ["btrfs", "property", "set", path, "ro", "false"] + match volume: + case RemoteVolume(): + ep = resolved_endpoints[volume.slug] + result = run_remote_command(ep.server, cmd, ep.proxy_chain) + case LocalVolume(): + result = subprocess.run( + cmd, + capture_output=True, + text=True, + ) + + if result.returncode != 0: + raise RuntimeError( + f"btrfs property set ro=false failed: {result.stderr}" + ) + + +def delete_snapshot( + path: str, + volume: Volume, + resolved_endpoints: ResolvedEndpoints, +) -> None: + """Delete a single btrfs snapshot subvolume. + + First unsets the readonly property (needed when the filesystem + is mounted with user_subvol_rm_allowed instead of granting + CAP_SYS_ADMIN), then deletes the subvolume. + """ + _make_snapshot_writable(path, volume, resolved_endpoints) + + cmd = ["btrfs", "subvolume", "delete", path] + match volume: + case RemoteVolume(): + ep = resolved_endpoints[volume.slug] + result = run_remote_command(ep.server, cmd, ep.proxy_chain) + case LocalVolume(): + result = subprocess.run( + cmd, + capture_output=True, + text=True, + ) + + if result.returncode != 0: + raise RuntimeError(f"btrfs delete failed: {result.stderr}") + + +def prune_snapshots( + sync: SyncConfig, + config: Config, + max_snapshots: int, + *, + dry_run: bool = False, + resolved_endpoints: ResolvedEndpoints | None = None, +) -> list[str]: + """Delete oldest snapshots exceeding max_snapshots. + + Returns list of deleted (or would-be-deleted) paths. + """ + re = resolved_endpoints or {} + snapshots = list_snapshots(sync, config, re) + excess = len(snapshots) - max_snapshots + if excess <= 0: + return [] + + to_delete = snapshots[:excess] + if not dry_run: + dst_vol = config.volumes[sync.destination.volume] + for path in to_delete: + delete_snapshot(path, dst_vol, re) + + return to_delete diff --git a/nbkp/sync/hardlinks.py b/nbkp/sync/hardlinks.py new file mode 100644 index 0000000..d8305a1 --- /dev/null +++ b/nbkp/sync/hardlinks.py @@ -0,0 +1,211 @@ +"""Hard-link snapshot creation, lookup, symlink management, and pruning.""" + +from __future__ import annotations + +import shutil +import subprocess +from datetime import datetime, timezone +from pathlib import Path + +from ..config import ( + Config, + LocalVolume, + RemoteVolume, + ResolvedEndpoints, + SyncConfig, + Volume, +) +from ..remote import run_remote_command +from .btrfs import list_snapshots, resolve_dest_path + + +def create_snapshot_dir( + sync: SyncConfig, + config: Config, + *, + now: datetime | None = None, + resolved_endpoints: ResolvedEndpoints | None = None, +) -> str: + """Create a snapshot directory for the current sync. + + Returns the full snapshot path. + """ + re = resolved_endpoints or {} + if now is None: + now = datetime.now(timezone.utc) + dest_path = resolve_dest_path(sync, config) + timestamp = now.isoformat(timespec="milliseconds").replace("+00:00", "Z") + snapshot_path = f"{dest_path}/snapshots/{timestamp}" + + dst_vol = config.volumes[sync.destination.volume] + match dst_vol: + case RemoteVolume(): + ep = re[dst_vol.slug] + result = run_remote_command( + ep.server, ["mkdir", "-p", snapshot_path], ep.proxy_chain + ) + case LocalVolume(): + result = subprocess.run( + ["mkdir", "-p", snapshot_path], + capture_output=True, + text=True, + ) + + if result.returncode != 0: + raise RuntimeError(f"mkdir snapshot dir failed: {result.stderr}") + return snapshot_path + + +def read_latest_symlink( + sync: SyncConfig, + config: Config, + *, + resolved_endpoints: ResolvedEndpoints | None = None, +) -> str | None: + """Read the latest symlink target, returning the snapshot name. + + Returns None if the symlink does not exist. + """ + re = resolved_endpoints or {} + dest_path = resolve_dest_path(sync, config) + latest_path = f"{dest_path}/latest" + + dst_vol = config.volumes[sync.destination.volume] + match dst_vol: + case LocalVolume(): + p = Path(latest_path) + if not p.is_symlink(): + return None + target = str(p.readlink()) + case RemoteVolume(): + ep = re[dst_vol.slug] + result = run_remote_command( + ep.server, + ["readlink", latest_path], + ep.proxy_chain, + ) + if result.returncode != 0: + return None + target = result.stdout.strip() + + # Target is like "snapshots/{name}" — extract the name + if "/" in target: + return target.rsplit("/", 1)[-1] + else: + return target + + +def update_latest_symlink( + sync: SyncConfig, + config: Config, + snapshot_name: str, + *, + resolved_endpoints: ResolvedEndpoints | None = None, +) -> None: + """Create or update the latest symlink to point to a snapshot.""" + re = resolved_endpoints or {} + dest_path = resolve_dest_path(sync, config) + latest_path = f"{dest_path}/latest" + target = f"snapshots/{snapshot_name}" + + dst_vol = config.volumes[sync.destination.volume] + match dst_vol: + case LocalVolume(): + p = Path(latest_path) + p.unlink(missing_ok=True) + p.symlink_to(target) + case RemoteVolume(): + ep = re[dst_vol.slug] + result = run_remote_command( + ep.server, + ["ln", "-sfn", target, latest_path], + ep.proxy_chain, + ) + if result.returncode != 0: + raise RuntimeError(f"symlink update failed: {result.stderr}") + + +def cleanup_orphaned_snapshots( + sync: SyncConfig, + config: Config, + *, + resolved_endpoints: ResolvedEndpoints | None = None, +) -> list[str]: + """Remove snapshots newer than the latest symlink target. + + These are leftover directories from failed syncs. + Returns list of deleted paths. + """ + re = resolved_endpoints or {} + latest_name = read_latest_symlink(sync, config, resolved_endpoints=re) + if latest_name is None: + return [] + + all_snapshots = list_snapshots(sync, config, re) + dst_vol = config.volumes[sync.destination.volume] + deleted: list[str] = [] + + for snap_path in all_snapshots: + snap_name = snap_path.rsplit("/", 1)[-1] + if snap_name > latest_name: + delete_snapshot(snap_path, dst_vol, re) + deleted.append(snap_path) + + return deleted + + +def delete_snapshot( + path: str, + volume: Volume, + resolved_endpoints: ResolvedEndpoints, +) -> None: + """Delete a hard-link snapshot directory.""" + match volume: + case RemoteVolume(): + ep = resolved_endpoints[volume.slug] + result = run_remote_command( + ep.server, ["rm", "-rf", path], ep.proxy_chain + ) + if result.returncode != 0: + raise RuntimeError(f"rm -rf snapshot failed: {result.stderr}") + case LocalVolume(): + shutil.rmtree(path) + + +def prune_snapshots( + sync: SyncConfig, + config: Config, + max_snapshots: int, + *, + dry_run: bool = False, + resolved_endpoints: ResolvedEndpoints | None = None, +) -> list[str]: + """Delete oldest snapshots exceeding max_snapshots. + + Never prunes the snapshot that the latest symlink points to. + Returns list of deleted (or would-be-deleted) paths. + """ + re = resolved_endpoints or {} + snapshots = list_snapshots(sync, config, re) + excess = len(snapshots) - max_snapshots + if excess <= 0: + return [] + + latest_name = read_latest_symlink(sync, config, resolved_endpoints=re) + + # Candidates are oldest first, but skip the latest target + to_delete: list[str] = [] + for snap_path in snapshots: + if len(to_delete) >= excess: + break + snap_name = snap_path.rsplit("/", 1)[-1] + if snap_name == latest_name: + continue + to_delete.append(snap_path) + + if not dry_run: + dst_vol = config.volumes[sync.destination.volume] + for path in to_delete: + delete_snapshot(path, dst_vol, re) + + return to_delete diff --git a/nbkp/sync/ordering.py b/nbkp/sync/ordering.py new file mode 100644 index 0000000..613decb --- /dev/null +++ b/nbkp/sync/ordering.py @@ -0,0 +1,52 @@ +"""Sync dependency graph and topological ordering.""" + +from __future__ import annotations + +from collections import defaultdict +from graphlib import CycleError, TopologicalSorter + +from ..config import ConfigError +from ..config.protocol import SyncConfig, SyncEndpoint + +EndpointKey = tuple[str, str | None] + + +def endpoint_key(endpoint: SyncEndpoint) -> EndpointKey: + """Return a hashable key for a sync endpoint.""" + return (endpoint.volume, endpoint.subdir) + + +def sort_syncs(syncs: dict[str, SyncConfig]) -> list[str]: + """Topologically sort syncs by their endpoint dependencies. + + A sync B depends on sync A when A's destination matches + B's source (same volume and subdir). Returns sync slugs + in an order where dependees come before dependents. + + Raises ``ConfigError`` when a dependency cycle is detected. + """ + # Map each destination endpoint to the syncs that write to it + writers: dict[EndpointKey, list[str]] = defaultdict(list) + for sync_slug, sync in syncs.items(): + dst_key = endpoint_key(sync.destination) + writers[dst_key].append(sync_slug) + + # Build the dependency graph: node → set of predecessors + graph: dict[str, set[str]] = {} + for sync_slug, sync in syncs.items(): + src_key = endpoint_key(sync.source) + deps = { + writer + for writer in writers.get(src_key, []) + if writer != sync_slug + } + graph[sync_slug] = deps + + ts = TopologicalSorter(graph) + try: + return list(ts.static_order()) + except CycleError as exc: + cycle = exc.args[1] + raise ConfigError( + "Cyclic sync dependency detected: " + " -> ".join(cycle) + ) from exc diff --git a/nbkp/sync/rsync.py b/nbkp/sync/rsync.py new file mode 100644 index 0000000..2a394f8 --- /dev/null +++ b/nbkp/sync/rsync.py @@ -0,0 +1,303 @@ +"""Rsync command building and execution.""" + +from __future__ import annotations + +import shlex +import subprocess +from enum import Enum +from typing import Callable + +from ..config import ( + Config, + LocalVolume, + RemoteVolume, + ResolvedEndpoints, + SshEndpoint, + SyncConfig, + SyncEndpoint, +) +from ..remote import ( + build_ssh_base_args, + build_ssh_e_option, + format_remote_path, +) + + +class ProgressMode(str, Enum): + """Rsync progress reporting mode.""" + + NONE = "none" + OVERALL = "overall" + PER_FILE = "per-file" + FULL = "full" + + +_DEFAULT_RSYNC_OPTIONS: list[str] = [ + "-a", + "--delete", + "--delete-excluded", + "--partial-dir=.rsync-partial", + "--safe-links", + "--filter=H .nbkp-*", # hide sentinels from transfer + "--filter=P .nbkp-*", # protect sentinels from deletion +] + + +def resolve_path( + volume: LocalVolume | RemoteVolume, subdir: str | None +) -> str: + """Resolve the full path for a volume with optional subdir.""" + if subdir: + return f"{volume.path}/{subdir}" + else: + return volume.path + + +def resolve_source_path( + volume: LocalVolume | RemoteVolume, + source: SyncEndpoint, +) -> str: + """Resolve source path, appending /latest for snapshots. + + When the source endpoint has snapshots configured (btrfs or + hard-link), rsync should read from the ``latest/`` directory + rather than the volume root. For hard-link snapshots, + ``latest`` is a symlink — rsync's trailing slash causes it + to follow the symlink and copy the target's contents. + """ + base = resolve_path(volume, source.subdir) + if source.snapshot_mode != "none": + return f"{base}/latest" + return base + + +def _base_rsync_args( + sync: SyncConfig, + dry_run: bool, + link_dest: str | None, + progress: ProgressMode | None = None, +) -> list[str]: + """Build common rsync flags.""" + rsync_opts = sync.rsync_options + options = ( + rsync_opts.default_options_override + if rsync_opts.default_options_override is not None + else _DEFAULT_RSYNC_OPTIONS + ) + args = ["rsync"] + list(options) + if rsync_opts.checksum: + args.append("--checksum") + if rsync_opts.compress: + args.append("--compress") + args.extend(rsync_opts.extra_options) + match progress: + case ProgressMode.OVERALL: + args.extend( + [ + "--info=progress2", + "--stats", + "--human-readable", + ] + ) + case ProgressMode.PER_FILE: + args.extend( + [ + "-v", + "--progress", + "--human-readable", + ] + ) + case ProgressMode.FULL: + args.extend( + [ + "-v", + "--progress", + "--info=progress2", + "--stats", + "--human-readable", + ] + ) + case ProgressMode.NONE | None: + pass + if dry_run: + args.append("--dry-run") + if link_dest: + args.append(f"--link-dest={link_dest}") + return args + + +def _filter_args(sync: SyncConfig) -> list[str]: + """Build rsync --filter arguments.""" + args: list[str] = [] + for rule in sync.filters: + args.append(f"--filter={rule}") + if sync.filter_file: + args.append(f"--filter=merge {sync.filter_file}") + return args + + +def build_rsync_command( + sync: SyncConfig, + config: Config, + dry_run: bool = False, + link_dest: str | None = None, + progress: ProgressMode | None = None, + resolved_endpoints: ResolvedEndpoints | None = None, + dest_suffix: str | None = None, +) -> list[str]: + """Build the rsync command for a sync operation. + + Returns the full command as a list of args, potentially + wrapped in SSH for remote-to-remote syncs. + """ + re = resolved_endpoints or {} + src_vol = config.volumes[sync.source.volume] + dst_vol = config.volumes[sync.destination.volume] + + src_path = resolve_source_path(src_vol, sync.source) + dst_path = resolve_path(dst_vol, sync.destination.subdir) + + match (src_vol, dst_vol): + case (RemoteVolume() as sv, RemoteVolume() as dv): + dst_ep = re[dv.slug] + return _build_remote_same_server( + sync, + dst_ep.server, + src_path, + dst_path, + dry_run, + link_dest, + progress, + proxy_chain=dst_ep.proxy_chain, + dest_suffix=dest_suffix, + ) + case (RemoteVolume() as sv, LocalVolume()): + src_ep = re[sv.slug] + rsync_args = _base_rsync_args(sync, dry_run, link_dest, progress) + rsync_args.extend(_filter_args(sync)) + rsync_args.extend( + build_ssh_e_option( + src_ep.server, + src_ep.proxy_chain, + ) + ) + rsync_args.append( + format_remote_path(src_ep.server, src_path) + "/" + ) + dst_target = ( + f"{dst_path}/{dest_suffix}/" if dest_suffix else f"{dst_path}/" + ) + rsync_args.append(dst_target) + return rsync_args + case (LocalVolume(), RemoteVolume() as dv): + dst_ep = re[dv.slug] + rsync_args = _base_rsync_args(sync, dry_run, link_dest, progress) + rsync_args.extend(_filter_args(sync)) + rsync_args.extend( + build_ssh_e_option( + dst_ep.server, + dst_ep.proxy_chain, + ) + ) + rsync_args.append(f"{src_path}/") + dst_remote = format_remote_path(dst_ep.server, dst_path) + dst_target = ( + f"{dst_remote}/{dest_suffix}/" + if dest_suffix + else f"{dst_remote}/" + ) + rsync_args.append(dst_target) + return rsync_args + case _: + rsync_args = _base_rsync_args(sync, dry_run, link_dest, progress) + rsync_args.extend(_filter_args(sync)) + rsync_args.append(f"{src_path}/") + dst_target = ( + f"{dst_path}/{dest_suffix}/" if dest_suffix else f"{dst_path}/" + ) + rsync_args.append(dst_target) + return rsync_args + + +def _build_remote_same_server( + sync: SyncConfig, + server: SshEndpoint, + src_path: str, + dst_path: str, + dry_run: bool, + link_dest: str | None, + progress: ProgressMode | None = None, + proxy_chain: list[SshEndpoint] | None = None, + dest_suffix: str | None = None, +) -> list[str]: + """Build rsync command when both volumes are on the same server. + + SSH into the server once and run rsync with local paths. + """ + rsync_args = _base_rsync_args(sync, dry_run, link_dest, progress) + rsync_args.extend(_filter_args(sync)) + rsync_args.append(f"{src_path}/") + dst_target = ( + f"{dst_path}/{dest_suffix}/" if dest_suffix else f"{dst_path}/" + ) + rsync_args.append(dst_target) + + inner_command = shlex.join(rsync_args) + return build_ssh_base_args(server, proxy_chain) + [inner_command] + + +def run_rsync( + sync: SyncConfig, + config: Config, + dry_run: bool = False, + link_dest: str | None = None, + progress: ProgressMode | None = None, + on_output: Callable[[str], None] | None = None, + resolved_endpoints: ResolvedEndpoints | None = None, + dest_suffix: str | None = None, +) -> subprocess.CompletedProcess[str]: + """Build and execute the rsync command for a sync.""" + cmd = build_rsync_command( + sync, + config, + dry_run, + link_dest, + progress, + resolved_endpoints, + dest_suffix=dest_suffix, + ) + if on_output is None: + return subprocess.run( + cmd, + capture_output=True, + text=True, + ) + else: + proc = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + bufsize=1, + ) + + assert proc.stdout is not None + output_chunks: list[str] = [] + + # Stream one character at a time so rsync progress + # updates that rely on carriage returns are visible + # immediately. + while True: + ch = proc.stdout.read(1) + if ch: + output_chunks.append(ch) + on_output(ch) + elif proc.poll() is not None: + break + + return subprocess.CompletedProcess( + cmd, + proc.wait(), + stdout="".join(output_chunks), + stderr="", + ) diff --git a/nbkp/sync/runner.py b/nbkp/sync/runner.py new file mode 100644 index 0000000..c472192 --- /dev/null +++ b/nbkp/sync/runner.py @@ -0,0 +1,446 @@ +"""Sync orchestration: checks -> rsync -> snapshots.""" + +from __future__ import annotations + +import shutil +from typing import Callable, Optional + +from pydantic import BaseModel + +from .btrfs import ( + create_snapshot, + prune_snapshots as btrfs_prune_snapshots, +) +from .hardlinks import ( + cleanup_orphaned_snapshots, + create_snapshot_dir, + prune_snapshots as hl_prune_snapshots, + update_latest_symlink, +) +from .btrfs import get_latest_snapshot +from ..config import Config, ResolvedEndpoints +from ..check import SyncStatus +from .rsync import ProgressMode, run_rsync + + +class SyncResult(BaseModel): + """Result of running a sync.""" + + sync_slug: str + success: bool + dry_run: bool + rsync_exit_code: int + output: str + snapshot_path: Optional[str] = None + pruned_paths: Optional[list[str]] = None + error: Optional[str] = None + + +class PruneResult(BaseModel): + """Result of pruning snapshots for a sync.""" + + sync_slug: str + deleted: list[str] + kept: int + dry_run: bool + error: Optional[str] = None + + +def run_all_syncs( + config: Config, + sync_statuses: dict[str, SyncStatus], + dry_run: bool = False, + only_syncs: list[str] | None = None, + progress: ProgressMode | None = None, + prune: bool = True, + on_rsync_output: Callable[[str], None] | None = None, + on_sync_start: Callable[[str], None] | None = None, + on_sync_end: Callable[[str, SyncResult], None] | None = None, + resolved_endpoints: ResolvedEndpoints | None = None, +) -> list[SyncResult]: + """Run all (or selected) syncs. + + Expects pre-computed sync statuses from ``check_all_syncs``. + """ + + results: list[SyncResult] = [] + + from .ordering import sort_syncs + + selected = ( + {s: st for s, st in sync_statuses.items() if s in only_syncs} + if only_syncs + else sync_statuses + ) + + ordered_slugs = sort_syncs({s: config.syncs[s] for s in selected}) + + for slug in ordered_slugs: + status = selected[slug] + if on_sync_start: + on_sync_start(slug) + + if not status.active: + result = SyncResult( + sync_slug=slug, + success=False, + dry_run=dry_run, + rsync_exit_code=-1, + output="", + error=( + "Sync not active: " + + ", ".join(r.value for r in status.reasons) + ), + ) + else: + result = _run_single_sync( + slug, + status, + config, + dry_run, + progress, + prune, + on_rsync_output, + resolved_endpoints, + ) + + results.append(result) + if on_sync_end: + on_sync_end(slug, result) + + return results + + +def _run_single_sync( + slug: str, + status: SyncStatus, + config: Config, + dry_run: bool, + progress: ProgressMode | None = None, + prune: bool = True, + on_rsync_output: Callable[[str], None] | None = None, + resolved_endpoints: ResolvedEndpoints | None = None, +) -> SyncResult: + """Run a single sync operation.""" + sync = status.config + + match sync.destination.snapshot_mode: + case "hard-link": + return _run_hard_link_sync( + slug, + sync, + config, + dry_run, + progress, + prune, + on_rsync_output, + resolved_endpoints, + ) + case "btrfs": + return _run_btrfs_sync( + slug, + sync, + config, + dry_run, + progress, + prune, + on_rsync_output, + resolved_endpoints, + ) + case _: + return _run_plain_sync( + slug, + sync, + config, + dry_run, + progress, + on_rsync_output, + resolved_endpoints, + ) + + +def _run_plain_sync( + slug: str, + sync: object, + config: Config, + dry_run: bool, + progress: ProgressMode | None, + on_rsync_output: Callable[[str], None] | None, + resolved_endpoints: ResolvedEndpoints | None, +) -> SyncResult: + """Run a sync with no snapshot strategy.""" + from ..config import SyncConfig + + assert isinstance(sync, SyncConfig) + try: + proc = run_rsync( + sync, + config, + dry_run=dry_run, + progress=progress, + on_output=on_rsync_output, + resolved_endpoints=resolved_endpoints, + dest_suffix=None, + ) + except Exception as e: + return SyncResult( + sync_slug=slug, + success=False, + dry_run=dry_run, + rsync_exit_code=-1, + output="", + error=str(e), + ) + + if proc.returncode != 0: + return SyncResult( + sync_slug=slug, + success=False, + dry_run=dry_run, + rsync_exit_code=proc.returncode, + output=proc.stdout + proc.stderr, + error=f"rsync exited with code {proc.returncode}", + ) + else: + return SyncResult( + sync_slug=slug, + success=True, + dry_run=dry_run, + rsync_exit_code=proc.returncode, + output=proc.stdout, + ) + + +def _run_btrfs_sync( + slug: str, + sync: object, + config: Config, + dry_run: bool, + progress: ProgressMode | None, + prune: bool, + on_rsync_output: Callable[[str], None] | None, + resolved_endpoints: ResolvedEndpoints | None, +) -> SyncResult: + """Run a sync with btrfs snapshot strategy.""" + from ..config import SyncConfig + + assert isinstance(sync, SyncConfig) + try: + proc = run_rsync( + sync, + config, + dry_run=dry_run, + progress=progress, + on_output=on_rsync_output, + resolved_endpoints=resolved_endpoints, + dest_suffix="latest", + ) + except Exception as e: + return SyncResult( + sync_slug=slug, + success=False, + dry_run=dry_run, + rsync_exit_code=-1, + output="", + error=str(e), + ) + + if proc.returncode != 0: + return SyncResult( + sync_slug=slug, + success=False, + dry_run=dry_run, + rsync_exit_code=proc.returncode, + output=proc.stdout + proc.stderr, + error=f"rsync exited with code {proc.returncode}", + ) + else: + snapshot_path: str | None = None + pruned_paths: list[str] | None = None + btrfs_cfg = sync.destination.btrfs_snapshots + if not dry_run: + try: + snapshot_path = create_snapshot( + sync, + config, + resolved_endpoints=resolved_endpoints, + ) + except RuntimeError as e: + return SyncResult( + sync_slug=slug, + success=False, + dry_run=dry_run, + rsync_exit_code=proc.returncode, + output=proc.stdout, + error=f"Snapshot failed: {e}", + ) + if prune and btrfs_cfg.max_snapshots is not None: + pruned_paths = btrfs_prune_snapshots( + sync, + config, + btrfs_cfg.max_snapshots, + resolved_endpoints=resolved_endpoints, + ) + + return SyncResult( + sync_slug=slug, + success=True, + dry_run=dry_run, + rsync_exit_code=proc.returncode, + output=proc.stdout, + snapshot_path=snapshot_path, + pruned_paths=pruned_paths, + ) + + +def _run_hard_link_sync( + slug: str, + sync: object, + config: Config, + dry_run: bool, + progress: ProgressMode | None, + prune: bool, + on_rsync_output: Callable[[str], None] | None, + resolved_endpoints: ResolvedEndpoints | None, +) -> SyncResult: + """Run a sync with hard-link snapshot strategy.""" + from ..config import SyncConfig + + assert isinstance(sync, SyncConfig) + hl_cfg = sync.destination.hard_link_snapshots + + # 1. Clean up orphaned snapshots from failed syncs + try: + cleanup_orphaned_snapshots( + sync, config, resolved_endpoints=resolved_endpoints + ) + except Exception: + pass # Best-effort cleanup + + # 2. Determine link-dest from latest complete snapshot + link_dest: str | None = None + latest = get_latest_snapshot(sync, config, resolved_endpoints) + if latest: + prev_name = latest.rsplit("/", 1)[-1] + link_dest = f"../{prev_name}" + + # 3. Create new snapshot directory + try: + snapshot_path = create_snapshot_dir( + sync, config, resolved_endpoints=resolved_endpoints + ) + except RuntimeError as e: + return SyncResult( + sync_slug=slug, + success=False, + dry_run=dry_run, + rsync_exit_code=-1, + output="", + error=f"Failed to create snapshot dir: {e}", + ) + snapshot_name = snapshot_path.rsplit("/", 1)[-1] + + # 4. Run rsync into the snapshot directory + try: + proc = run_rsync( + sync, + config, + dry_run=dry_run, + link_dest=link_dest, + progress=progress, + on_output=on_rsync_output, + resolved_endpoints=resolved_endpoints, + dest_suffix=f"snapshots/{snapshot_name}", + ) + except Exception as e: + return SyncResult( + sync_slug=slug, + success=False, + dry_run=dry_run, + rsync_exit_code=-1, + output="", + error=str(e), + ) + + if proc.returncode != 0: + # Clean up the empty snapshot dir on failure + _cleanup_snapshot_dir(snapshot_path, sync, config, resolved_endpoints) + return SyncResult( + sync_slug=slug, + success=False, + dry_run=dry_run, + rsync_exit_code=proc.returncode, + output=proc.stdout + proc.stderr, + error=f"rsync exited with code {proc.returncode}", + ) + + # 5. Update latest symlink (skip on dry-run) + pruned_paths: list[str] | None = None + if not dry_run: + try: + update_latest_symlink( + sync, + config, + snapshot_name, + resolved_endpoints=resolved_endpoints, + ) + except RuntimeError as e: + return SyncResult( + sync_slug=slug, + success=False, + dry_run=dry_run, + rsync_exit_code=proc.returncode, + output=proc.stdout, + error=f"Symlink update failed: {e}", + ) + + # 6. Prune old snapshots + if prune and hl_cfg.max_snapshots is not None: + pruned_paths = hl_prune_snapshots( + sync, + config, + hl_cfg.max_snapshots, + resolved_endpoints=resolved_endpoints, + ) + else: + # Dry-run: remove the empty snapshot dir + _cleanup_snapshot_dir(snapshot_path, sync, config, resolved_endpoints) + + return SyncResult( + sync_slug=slug, + success=True, + dry_run=dry_run, + rsync_exit_code=proc.returncode, + output=proc.stdout, + snapshot_path=snapshot_path if not dry_run else None, + pruned_paths=pruned_paths, + ) + + +def _cleanup_snapshot_dir( + snapshot_path: str, + sync: object, + config: Config, + resolved_endpoints: ResolvedEndpoints | None, +) -> None: + """Remove a snapshot directory (best-effort cleanup).""" + from ..config import LocalVolume, RemoteVolume, SyncConfig + + assert isinstance(sync, SyncConfig) + dst_vol = config.volumes[sync.destination.volume] + try: + match dst_vol: + case LocalVolume(): + shutil.rmtree(snapshot_path, ignore_errors=True) + case RemoteVolume(): + from ..remote import run_remote_command + + re = resolved_endpoints or {} + ep = re[dst_vol.slug] + run_remote_command( + ep.server, + ["rm", "-rf", snapshot_path], + ep.proxy_chain, + ) + except Exception: + pass # Best-effort cleanup diff --git a/nbkp/templates/__init__.py b/nbkp/templates/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/nbkp/templates/backup.sh.j2 b/nbkp/templates/backup.sh.j2 new file mode 100644 index 0000000..519289d --- /dev/null +++ b/nbkp/templates/backup.sh.j2 @@ -0,0 +1,143 @@ +#!/bin/bash +# Generated by nbkp sh — ${{ timestamp }} +${{ config_line }} +# +# Preserved from nbkp run: +# - All 4 rsync command variants (local→local, local→remote, remote→local, remote→remote) +# - SSH options (port, key, -o options, proxy jump -J) +# - Rsync filters and filter-file support +# - Btrfs: snapshot creation, pruning +# - Hard-link: incremental backups via --link-dest, symlink management, pruning +# - Pre-flight checks: volume sentinels (.nbkp-vol), endpoint sentinels (.nbkp-src/.nbkp-dst) +# - Dry-run and progress mode support (as runtime script arguments) +# - Nonzero exit on any sync failure +# +# Dropped from nbkp run: +# - Rich console output (spinners, tables, progress bars) → simple log messages +# - JSON output mode → not applicable for a shell script +# - Python runtime / config parsing → all values hardcoded +# - --strict/--no-strict flag → checks are inline, comment out if not needed +# - Paramiko-only SSH options (channel_timeout, disabled_algorithms) → no ssh CLI equivalent + +# Unofficial bash strict mode (http://redsymbol.net/articles/unofficial-bash-strict-mode/) +set -euo pipefail +IFS=$'\n\t' +<% if has_script_dir %> + +NBKP_SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +<% endif %> + +NBKP_DRY_RUN=false +NBKP_PROGRESS="" + +# --- Argument parsing --- +while [ $# -gt 0 ]; do + case "$1" in + -n|--dry-run) NBKP_DRY_RUN=true; shift;; + --progress=*) NBKP_PROGRESS="${1#--progress=}"; shift;; + --progress) NBKP_PROGRESS="$2"; shift 2;; + -p) NBKP_PROGRESS="$2"; shift 2;; + *) echo "Unknown option: $1" >&2; exit 1;; + esac +done + +NBKP_FAILURES=0 +nbkp_log() { + if [ "$NBKP_DRY_RUN" = true ]; then + echo "[nbkp] [dry-run] $*" >&2 + else + echo "[nbkp] $*" >&2 + fi +} + +# --- Rsync runtime flags --- +RSYNC_DRY_RUN_FLAG="" +if [ "$NBKP_DRY_RUN" = true ]; then RSYNC_DRY_RUN_FLAG="--dry-run"; fi +RSYNC_PROGRESS_FLAGS="" +case "$NBKP_PROGRESS" in + none) ;; + overall) RSYNC_PROGRESS_FLAGS="--info=progress2 --stats --human-readable";; + per-file) RSYNC_PROGRESS_FLAGS="-v --progress --human-readable";; + full) RSYNC_PROGRESS_FLAGS="-v --progress --info=progress2 --stats --human-readable";; + "") ;; + *) echo "Unknown progress mode: $NBKP_PROGRESS (use 'none', 'overall', 'per-file', or 'full')" >&2; exit 1;; +esac + +# --- Volume checks --- +<% for check in volume_checks %> +${{ check }} +<% endfor %> + +# --- Sync functions --- +<% for sync in syncs %> +<% if sync.enabled %> + +${{ sync.fn_name }}() { + nbkp_log "Starting sync: ${{ sync.slug }}" + + # Pre-flight checks +${{ sync.preflight | indent(4, first=true) }} +<% if sync.has_hard_link %> + + # Cleanup orphaned snapshots +${{ sync.orphan_cleanup | indent(4, first=true) }} + + # Link-dest resolution (latest snapshot for incremental backup) +${{ sync.link_dest | indent(4, first=true) }} + + # Create snapshot directory +${{ sync.hl_mkdir | indent(4, first=true) }} +<% endif %> +<% if sync.has_btrfs %> + + # Link-dest resolution (latest snapshot for incremental backup) +${{ sync.link_dest | indent(4, first=true) }} +<% endif %> + + # Rsync +${{ sync.rsync | indent(4, first=true) }} +<% if sync.has_btrfs %> + + # Btrfs snapshot (skip if dry-run) +${{ sync.snapshot | indent(4, first=true) }} +<% if sync.has_prune %> + + # Prune old snapshots (max: ${{ sync.max_snapshots }}) +${{ sync.prune | indent(4, first=true) }} +<% endif %> +<% endif %> +<% if sync.has_hard_link %> + + # Update latest symlink (skip if dry-run) +${{ sync.symlink | indent(4, first=true) }} +<% if sync.has_prune %> + + # Prune old snapshots (max: ${{ sync.max_snapshots }}) +${{ sync.hl_prune | indent(4, first=true) }} +<% endif %> +<% endif %> + + nbkp_log "Completed sync: ${{ sync.slug }}" +} +<% else %> + +# : disabled — ${{ sync.slug }} +${{ sync.disabled_body }} +<% endif %> +<% endfor %> + +# --- Run all syncs --- +<% for sync in syncs %> +<% if sync.enabled %> +${{ sync.fn_name }} || NBKP_FAILURES=$((NBKP_FAILURES + 1)) +<% else %> +# ${{ sync.fn_name }} || NBKP_FAILURES=$((NBKP_FAILURES + 1)) # disabled +<% endif %> +<% endfor %> + +# --- Summary --- +if [ "$NBKP_FAILURES" -gt 0 ]; then + nbkp_log "$NBKP_FAILURES sync(s) failed" + exit 1 +fi +nbkp_log "All syncs completed successfully" diff --git a/nbkp/testcli.py b/nbkp/testcli.py new file mode 100644 index 0000000..2b45e1e --- /dev/null +++ b/nbkp/testcli.py @@ -0,0 +1,619 @@ +"""Developer test CLI: fake output rendering and seed data.""" + +from __future__ import annotations + +import tempfile +from io import StringIO +from pathlib import Path +from typing import Annotated + +import typer +import yaml +from pydantic import ValidationError +from rich.console import Console +from rich.panel import Panel +from rich.syntax import Syntax +from rich.text import Text + +from .config import ( + BtrfsSnapshotConfig, + Config, + ConfigError, + HardLinkSnapshotConfig, + LocalVolume, + RemoteVolume, + RsyncOptions, + SshEndpoint, + SyncConfig, + SyncEndpoint, +) +from .testkit.docker import ( + BASTION_CONTAINER_NAME, + CONTAINER_NAME, + DOCKER_DIR, + REMOTE_BACKUP_PATH, + REMOTE_BTRFS_PATH, + build_docker_image, + check_docker, + create_docker_network, + create_test_ssh_endpoint, + generate_ssh_keypair, + ssh_exec, + start_bastion_container, + start_docker_container, + wait_for_ssh, +) +from .config.protocol import Config as ConfigModel +from .config.resolution import resolve_all_endpoints +from .output import ( + print_config_error, + print_human_check, + print_human_config, + print_human_prune_results, + print_human_results, + print_human_troubleshoot, +) +from .testkit.gen.check import ( + check_config, + check_data, + troubleshoot_config, + troubleshoot_data, +) +from .testkit.gen.config import config_show_config +from .testkit.gen.fs import ( + create_seed_sentinels, + seed_volume, +) +from .testkit.gen.sync import ( + dry_run_result, + prune_dry_run_results, + prune_results, + run_results, +) + +_console = Console() + +app = typer.Typer( + name="nbkp-test", + help="NBKP developer test CLI", + no_args_is_help=True, +) + + +# ── Commands ───────────────────────────────────────────────────── + + +def _capture_console() -> tuple[Console, StringIO]: + """Create a Console that captures output to a StringIO buffer.""" + buf = StringIO() + console = Console( + file=buf, + force_terminal=True, + width=_console.width - 4, + ) + return console, buf + + +def _print_panel(title: str, buf: StringIO) -> None: + """Wrap captured console output in a titled panel.""" + content = Text.from_ansi(buf.getvalue().rstrip("\n")) + _console.print( + Panel( + content, + title=f"[bold]{title}[/bold]", + border_style="cyan", + padding=(0, 1), + ) + ) + + +@app.command() +def output() -> None: + """Render all human output functions with fake data.""" + _show_config_show() + _show_check() + _show_results() + _show_prune() + _show_troubleshoot() + _show_config_errors() + + +def _show_config_show() -> None: + console, buf = _capture_console() + config = config_show_config() + re = resolve_all_endpoints(config) + print_human_config(config, console=console, resolved_endpoints=re) + _print_panel("print_human_config", buf) + + +def _show_check() -> None: + console, buf = _capture_console() + config = check_config() + re = resolve_all_endpoints(config) + vol_statuses, sync_statuses = check_data(config) + print_human_check( + vol_statuses, + sync_statuses, + config, + console=console, + resolved_endpoints=re, + wrap_in_panel=False, + ) + _print_panel("print_human_check", buf) + + +def _show_results() -> None: + config = config_show_config() + console, buf = _capture_console() + print_human_results(run_results(config), dry_run=False, console=console) + _print_panel("print_human_results (run)", buf) + + console, buf = _capture_console() + print_human_results( + [dry_run_result(config)], dry_run=True, console=console + ) + _print_panel("print_human_results (dry run)", buf) + + +def _show_prune() -> None: + config = config_show_config() + console, buf = _capture_console() + print_human_prune_results( + prune_results(config), dry_run=False, console=console + ) + _print_panel("print_human_prune_results (prune)", buf) + + console, buf = _capture_console() + print_human_prune_results( + prune_dry_run_results(config), + dry_run=True, + console=console, + ) + _print_panel("print_human_prune_results (dry run)", buf) + + +def _show_troubleshoot() -> None: + console, buf = _capture_console() + config = troubleshoot_config() + re = resolve_all_endpoints(config) + vol_statuses, sync_statuses = troubleshoot_data(config) + print_human_troubleshoot( + vol_statuses, + sync_statuses, + config, + console=console, + resolved_endpoints=re, + ) + _print_panel("print_human_troubleshoot", buf) + + +def _show_config_errors() -> None: + console, buf = _capture_console() + print_config_error( + ConfigError("Config file not found: /etc/nbkp/config.yaml"), + console=console, + ) + _print_panel("print_config_error (file not found)", buf) + + console, buf = _capture_console() + try: + yaml.safe_load("not_a_list:\n - [invalid") + except yaml.YAMLError as ye: + err = ConfigError(f"Invalid YAML in /etc/nbkp/config.yaml: {ye}") + err.__cause__ = ye + print_config_error(err, console=console) + _print_panel("print_config_error (invalid YAML)", buf) + + console, buf = _capture_console() + try: + ConfigModel.model_validate( + {"volumes": {"v": {"type": "ftp", "path": "/x"}}} + ) + except ValidationError as ve: + err = ConfigError(str(ve)) + err.__cause__ = ve + print_config_error(err, console=console) + _print_panel("print_config_error (invalid volume type)", buf) + + console, buf = _capture_console() + try: + ConfigModel.model_validate( + { + "ssh-endpoints": {}, + "volumes": { + "v": { + "type": "remote", + "ssh-endpoint": "missing", + "path": "/x", + }, + }, + "syncs": {}, + } + ) + except ValidationError as ve: + err = ConfigError(str(ve)) + err.__cause__ = ve + print_config_error(err, console=console) + _print_panel("print_config_error (unknown server reference)", buf) + + console, buf = _capture_console() + try: + ConfigModel.model_validate( + {"volumes": {"v": {"type": "local"}}, "syncs": {}} + ) + except ValidationError as ve: + err = ConfigError(str(ve)) + err.__cause__ = ve + print_config_error(err, console=console) + _print_panel("print_config_error (missing required field)", buf) + + +@app.command() +def seed( + big_file_size: Annotated[ + int, + typer.Option( + "--big-file-size", + help="Size in MB for large files (e.g. 100, 1024)." + " When set, large files are written at this size" + " to slow down syncs." + " Set to 0 to disable.", + ), + ] = 1, + docker: Annotated[ + bool, + typer.Option( + "--docker", + help="Start a Docker container for remote syncs.", + ), + ] = False, + bandwidth_limit: Annotated[ + int, + typer.Option( + "--bandwidth-limit", + help="Rsync bandwidth limit in KiB/s" + " (e.g. 100 for ~100 KiB/s)." + " Set to 0 to disable.", + ), + ] = 250, +) -> None: + """Create a temp folder with config and test data.""" + rsync_opts = ( + RsyncOptions(extra_options=[f"--bwlimit={bandwidth_limit}"]) + if bandwidth_limit + else RsyncOptions() + ) + + if docker: + check_docker() + if not DOCKER_DIR.is_dir(): + typer.echo( + "Error: Docker directory not found:" f" {DOCKER_DIR}", + err=True, + ) + raise typer.Exit(1) + + tmp = Path(tempfile.mkdtemp(prefix="nbkp-seed-")) + + # Docker containers + docker_endpoint = None + bastion_endpoint = None + if docker: + private_key, pub_key = generate_ssh_keypair(tmp) + + with _console.status("Building Docker image..."): + build_docker_image() + + with _console.status("Creating Docker network..."): + network_name = create_docker_network() + + with _console.status("Starting bastion container..."): + bastion_port = start_bastion_container(pub_key, network_name) + bastion_endpoint = create_test_ssh_endpoint( + "bastion", "127.0.0.1", bastion_port, private_key + ) + with _console.status("Waiting for bastion SSH..."): + wait_for_ssh(bastion_endpoint) + + with _console.status("Starting Docker container..."): + docker_port = start_docker_container( + pub_key, + network_name=network_name, + network_alias="backup-server", + ) + docker_endpoint = create_test_ssh_endpoint( + "docker", "127.0.0.1", docker_port, private_key + ) + with _console.status("Waiting for SSH..."): + wait_for_ssh(docker_endpoint) + + # Config — chain layout matching integration test + hl_src = HardLinkSnapshotConfig(enabled=True) + hl_dst = HardLinkSnapshotConfig(enabled=True) + + ssh_endpoints: dict[str, SshEndpoint] = {} + volumes: dict[str, LocalVolume | RemoteVolume] = { + "src-local-bare": LocalVolume( + slug="src-local-bare", + path=str(tmp / "src-local-bare"), + ), + "stage-local-hl-snapshots": LocalVolume( + slug="stage-local-hl-snapshots", + path=str(tmp / "stage-local-hl-snapshots"), + ), + "dst-local-bare": LocalVolume( + slug="dst-local-bare", + path=str(tmp / "dst-local-bare"), + ), + } + syncs: dict[str, SyncConfig] = { + # local→local, HL destination + "step-1": SyncConfig( + slug="step-1", + source=SyncEndpoint(volume="src-local-bare"), + destination=SyncEndpoint( + volume="stage-local-hl-snapshots", + hard_link_snapshots=hl_dst, + ), + rsync_options=rsync_opts, + ), + } + + if docker: + assert docker_endpoint is not None + assert bastion_endpoint is not None + btrfs_snapshots_path = f"{REMOTE_BTRFS_PATH}/snapshots" + btrfs_bare_path = f"{REMOTE_BTRFS_PATH}/bare" + btrfs_dst = BtrfsSnapshotConfig(enabled=True) + btrfs_src = BtrfsSnapshotConfig(enabled=True) + + ssh_endpoints["bastion"] = bastion_endpoint + ssh_endpoints["docker"] = docker_endpoint + ssh_endpoints["via-bastion"] = create_test_ssh_endpoint( + "via-bastion", + "backup-server", + 22, + private_key, + proxy_jump="bastion", + ) + volumes.update( + { + "stage-remote-bare": RemoteVolume( + slug="stage-remote-bare", + ssh_endpoint="via-bastion", + path=f"{REMOTE_BACKUP_PATH}/bare", + ), + "stage-remote-btrfs-snapshots": RemoteVolume( + slug="stage-remote-btrfs-snapshots", + ssh_endpoint="via-bastion", + path=btrfs_snapshots_path, + ), + "stage-remote-btrfs-bare": RemoteVolume( + slug="stage-remote-btrfs-bare", + ssh_endpoint="via-bastion", + path=btrfs_bare_path, + ), + "stage-remote-hl-snapshots": RemoteVolume( + slug="stage-remote-hl-snapshots", + ssh_endpoint="via-bastion", + path=f"{REMOTE_BACKUP_PATH}/hl", + ), + } + ) + syncs.update( + { + # local→remote (bastion), bare dest + "step-2": SyncConfig( + slug="step-2", + source=SyncEndpoint( + volume="stage-local-hl-snapshots", + hard_link_snapshots=hl_src, + ), + destination=SyncEndpoint( + volume="stage-remote-bare", + ), + rsync_options=rsync_opts, + ), + # remote→remote (bastion), btrfs dest + "step-3": SyncConfig( + slug="step-3", + source=SyncEndpoint( + volume="stage-remote-bare", + ), + destination=SyncEndpoint( + volume=("stage-remote-btrfs-snapshots"), + btrfs_snapshots=btrfs_dst, + ), + rsync_options=rsync_opts, + ), + # remote→remote (bastion), bare on btrfs + "step-4": SyncConfig( + slug="step-4", + source=SyncEndpoint( + volume=("stage-remote-btrfs-snapshots"), + btrfs_snapshots=btrfs_src, + ), + destination=SyncEndpoint( + volume="stage-remote-btrfs-bare", + ), + rsync_options=rsync_opts, + ), + # remote→remote (bastion), HL dest + "step-5": SyncConfig( + slug="step-5", + source=SyncEndpoint( + volume="stage-remote-btrfs-bare", + ), + destination=SyncEndpoint( + volume=("stage-remote-hl-snapshots"), + hard_link_snapshots=hl_dst, + ), + rsync_options=rsync_opts, + ), + # remote (bastion)→local, bare dest + "step-6": SyncConfig( + slug="step-6", + source=SyncEndpoint( + volume=("stage-remote-hl-snapshots"), + hard_link_snapshots=hl_src, + ), + destination=SyncEndpoint( + volume="dst-local-bare", + ), + rsync_options=rsync_opts, + ), + } + ) + else: + # Local-only: step-2 goes directly to dst + syncs["step-2"] = SyncConfig( + slug="step-2", + source=SyncEndpoint( + volume="stage-local-hl-snapshots", + hard_link_snapshots=hl_src, + ), + destination=SyncEndpoint( + volume="dst-local-bare", + ), + rsync_options=rsync_opts, + ) + + config = Config( + ssh_endpoints=ssh_endpoints, + volumes=volumes, + syncs=syncs, + ) + + remote_exec = None + # Create sentinels and seed data + size_bytes = big_file_size * 1024 * 1024 + if docker: + assert docker_endpoint is not None + _server = docker_endpoint + + def _run_remote(cmd: str) -> None: + ssh_exec(_server, cmd) + + with _console.status("Creating btrfs subvolume..."): + ssh_exec( + docker_endpoint, + "btrfs subvolume create" f" {btrfs_snapshots_path}", + ) + remote_exec = _run_remote + + with _console.status("Setting up volumes..."): + create_seed_sentinels(config, remote_exec=remote_exec) + seed_volume( + config.volumes["src-local-bare"], + big_file_size_bytes=size_bytes, + ) + + config_path = tmp / "config.yaml" + config_path.write_text( + yaml.safe_dump( + config.model_dump(by_alias=True), + default_flow_style=False, + sort_keys=False, + ) + ) + + backup_sh = tmp / "backup.sh" + + # Print summary + rows: list[tuple[str, str]] = [ + ("Seed directory", str(tmp)), + ("Config file", str(config_path)), + ] + if docker: + assert docker_endpoint is not None + assert bastion_endpoint is not None + rows.append( + ( + "Bastion", + f"{BASTION_CONTAINER_NAME}" f" (port {bastion_endpoint.port})", + ) + ) + rows.append( + ( + "Docker", + f"{CONTAINER_NAME}" f" (port {docker_endpoint.port})", + ) + ) + label_w = max(len(r[0]) for r in rows) + summary = Text() + for i, (label, value) in enumerate(rows): + if i > 0: + summary.append("\n") + summary.append(f"{label:<{label_w}} ", style="bold") + summary.append(value) + _console.print(Panel(summary, border_style="blue", padding=(0, 1))) + + lines = [ + f'CFG="{config_path}"', + f'SH="{backup_sh}"', + "", + "# Show parsed configuration", + "poetry run nbkp config show --config $CFG", + "", + "# Show configuration as JSON", + "poetry run nbkp config show --config $CFG --output json", + "", + "# Volume and sync health checks", + "poetry run nbkp check --config $CFG", + "", + "# Preview what rsync would do without changes", + "poetry run nbkp run --config $CFG --dry-run", + "", + "# Execute backup syncs", + "poetry run nbkp run --config $CFG", + "", + "# Prune old btrfs snapshots", + "poetry run nbkp prune --config $CFG", + "", + "# Generate standalone bash script to stdout", + "poetry run nbkp sh --config $CFG", + "", + "# Write script to file, validate, and run", + "poetry run nbkp sh --config $CFG -o $SH \\", + " && bash -n $SH \\", + " && $SH --dry-run \\", + " && $SH", + "", + "# With relative paths (src and dst)", + "poetry run nbkp sh --config $CFG -o $SH" + " --relative-src --relative-dst \\", + " && bash -n $SH \\", + " && $SH --dry-run \\", + " && $SH", + ] + if docker: + lines += [ + "", + "# Teardown Docker containers and network", + f"docker rm -f {CONTAINER_NAME}" f" {BASTION_CONTAINER_NAME}", + "docker network rm nbkp-seed-net", + ] + commands = "\n".join(lines) + _console.print( + Panel( + Syntax( + commands, + "bash", + theme="monokai", + background_color="default", + word_wrap=True, + ), + title="[bold]Try[/bold]", + border_style="green", + padding=(0, 1), + ) + ) + + +def main() -> None: + """Test CLI entry point.""" + app() + + +if __name__ == "__main__": + main() diff --git a/nbkp/testkit/__init__.py b/nbkp/testkit/__init__.py new file mode 100644 index 0000000..769f8f8 --- /dev/null +++ b/nbkp/testkit/__init__.py @@ -0,0 +1 @@ +"""Developer test kit: seed data, Docker helpers, fake outputs.""" diff --git a/nbkp/testkit/docker.py b/nbkp/testkit/docker.py new file mode 100644 index 0000000..a9e3291 --- /dev/null +++ b/nbkp/testkit/docker.py @@ -0,0 +1,309 @@ +"""Docker helpers for the developer test CLI seed command.""" + +from __future__ import annotations + +import socket +import subprocess +import time +from pathlib import Path + +import docker as dockerlib +import typer + +from ..config import SshConnectionOptions, SshEndpoint +from ..remote.fabricssh import run_remote_command + +DOCKER_DIR = Path(__file__).resolve().parent / "dockerbuild" +CONTAINER_NAME = "nbkp-seed" +BASTION_CONTAINER_NAME = "nbkp-seed-bastion" +_IMAGE_TAG = "nbkp-seed-server:latest" +_NETWORK_NAME = "nbkp-seed-net" + +# ── Standard remote paths inside the test container ────────── + +REMOTE_BACKUP_PATH = "/srv/backups" +REMOTE_BTRFS_PATH = "/srv/btrfs-backups" +SSH_AUTHORIZED_KEYS_PATH = "/mnt/ssh-authorized-keys" + + +# ── SSH endpoint factory ───────────────────────────────────── + + +def create_test_ssh_endpoint( + slug: str, + host: str, + port: int, + private_key: Path, + *, + proxy_jump: str | None = None, +) -> SshEndpoint: + """Create an SshEndpoint with standard test connection options. + + All test containers use ``testuser``, disabled host-key checking, + and ``/dev/null`` as the known-hosts file. + """ + return SshEndpoint( + slug=slug, + host=host, + port=port, + user="testuser", + key=str(private_key), + proxy_jump=proxy_jump, + connection_options=SshConnectionOptions( + strict_host_key_checking=False, + known_hosts_file="/dev/null", + ), + ) + + +# ── Docker lifecycle ───────────────────────────────────────── + + +def check_docker() -> None: + """Verify Docker daemon is reachable.""" + try: + client = dockerlib.from_env() + client.ping() + except dockerlib.errors.DockerException as exc: + typer.echo( + f"Error: Docker is not available: {exc}", + err=True, + ) + raise typer.Exit(1) + + +def generate_ssh_keypair( + seed_dir: Path, +) -> tuple[Path, Path]: + """Generate Ed25519 SSH key pair in seed_dir/ssh/.""" + from cryptography.hazmat.primitives import serialization + from cryptography.hazmat.primitives.asymmetric.ed25519 import ( + Ed25519PrivateKey, + ) + + ssh_dir = seed_dir / "ssh" + ssh_dir.mkdir() + private_key_path = ssh_dir / "id_ed25519" + public_key_path = ssh_dir / "id_ed25519.pub" + + key = Ed25519PrivateKey.generate() + private_key_path.write_bytes( + key.private_bytes( + serialization.Encoding.PEM, + serialization.PrivateFormat.OpenSSH, + serialization.NoEncryption(), + ) + ) + private_key_path.chmod(0o600) + pub_bytes = key.public_key().public_bytes( + serialization.Encoding.OpenSSH, + serialization.PublicFormat.OpenSSH, + ) + public_key_path.write_text(f"{pub_bytes.decode()} nbkp-seed\n") + + return private_key_path, public_key_path + + +def create_docker_network() -> str: + """Create a Docker bridge network for container communication.""" + client = dockerlib.from_env() + try: + old = client.networks.get(_NETWORK_NAME) + old.reload() + for cid in list(old.attrs.get("Containers") or {}): + try: + old.disconnect(cid, force=True) + except dockerlib.errors.APIError: + pass + old.remove() + except dockerlib.errors.NotFound: + pass + client.networks.create(_NETWORK_NAME, driver="bridge") + return _NETWORK_NAME + + +def remove_docker_network() -> None: + """Remove the Docker bridge network.""" + client = dockerlib.from_env() + try: + network = client.networks.get(_NETWORK_NAME) + network.remove() + except dockerlib.errors.NotFound: + pass + + +def build_docker_image() -> None: + """Build the Docker image used by all seed containers.""" + client = dockerlib.from_env() + try: + client.images.build( + path=str(DOCKER_DIR), + tag=_IMAGE_TAG, + nocache=True, + ) + except dockerlib.errors.BuildError as exc: + typer.echo( + f"Error: Docker image build failed: {exc}", + err=True, + ) + raise typer.Exit(1) + + +def start_docker_container( + pub_key: Path, + network_name: str | None = None, + network_alias: str | None = None, +) -> int: + """Destroy old container, start new. Return SSH port. + + The image must already be built via build_docker_image(). + """ + client = dockerlib.from_env() + + # Remove existing container if any + try: + old = client.containers.get(CONTAINER_NAME) + old.remove(force=True) + except dockerlib.errors.NotFound: + pass + + # Start container + container = client.containers.run( + _IMAGE_TAG, + detach=True, + name=CONTAINER_NAME, + privileged=True, + ports={"22/tcp": None}, + volumes={ + str(pub_key): { + "bind": SSH_AUTHORIZED_KEYS_PATH, + "mode": "ro", + } + }, + ) + + if network_name is not None: + network = client.networks.get(network_name) + aliases = [network_alias] if network_alias else None + network.connect(container, aliases=aliases) + + # Get mapped port + container.reload() + port_info = container.attrs["NetworkSettings"]["Ports"]["22/tcp"] + return int(port_info[0]["HostPort"]) + + +def start_bastion_container( + pub_key: Path, + network_name: str, +) -> int: + """Start a bastion (jump proxy) container. Return SSH port.""" + client = dockerlib.from_env() + + # Remove existing container if any + try: + old = client.containers.get(BASTION_CONTAINER_NAME) + old.remove(force=True) + except dockerlib.errors.NotFound: + pass + + container = client.containers.run( + _IMAGE_TAG, + detach=True, + name=BASTION_CONTAINER_NAME, + ports={"22/tcp": None}, + environment={"NBKP_BASTION_ONLY": "1"}, + volumes={ + str(pub_key): { + "bind": SSH_AUTHORIZED_KEYS_PATH, + "mode": "ro", + } + }, + ) + + network = client.networks.get(network_name) + network.connect(container) + + # Get mapped port + container.reload() + port_info = container.attrs["NetworkSettings"]["Ports"]["22/tcp"] + return int(port_info[0]["HostPort"]) + + +def wait_for_ssh( + server: SshEndpoint, + timeout: int = 30, +) -> None: + """Poll SSH until the daemon sends its banner.""" + deadline = time.time() + timeout + while time.time() < deadline: + try: + with socket.create_connection( + (server.host, server.port), timeout=2 + ) as sock: + data = sock.recv(256) + if data.startswith(b"SSH-"): + return + except OSError: + pass + time.sleep(1) + raise TimeoutError(f"SSH not ready after {timeout}s") + + +# ── Remote command helpers ─────────────────────────────────── + + +def ssh_exec( + server: SshEndpoint, + command: str, + *, + check: bool = True, +) -> subprocess.CompletedProcess[str]: + """Run a shell command on the container via SSH.""" + result = run_remote_command(server, ["sh", "-c", command]) + if check and result.returncode != 0: + raise subprocess.CalledProcessError( + result.returncode, + command, + result.stdout, + result.stderr, + ) + return result + + +def create_sentinels( + server: SshEndpoint, + path: str, + sentinels: list[str], +) -> None: + """Create sentinel files on the container via SSH.""" + for sentinel in sentinels: + result = ssh_exec(server, f"touch {path}/{sentinel}", check=False) + if result.returncode != 0: + raise RuntimeError( + f"Failed to create sentinel {sentinel}:" f" {result.stderr}" + ) + + +def prepare_btrfs_snapshot_based_backup_dst( + server: SshEndpoint, + path: str, +) -> None: + """Create btrfs destination structure. + + Creates the ``latest`` btrfs subvolume and the ``snapshots`` + directory under *path*. + """ + ssh_exec(server, f"btrfs subvolume create {path}/latest") + ssh_exec(server, f"mkdir -p {path}/snapshots") + + +def prepare_hardlinks_snapshot_based_backup_dst( + server: SshEndpoint, + path: str, +) -> None: + """Create hard-link destination structure. + + Creates the ``snapshots`` directory under *path*. + """ + ssh_exec(server, f"mkdir -p {path}/snapshots") diff --git a/nbkp/testkit/dockerbuild/Dockerfile b/nbkp/testkit/dockerbuild/Dockerfile new file mode 100644 index 0000000..75c8c66 --- /dev/null +++ b/nbkp/testkit/dockerbuild/Dockerfile @@ -0,0 +1,22 @@ +FROM debian:latest + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + openssh-server \ + rsync \ + btrfs-progs \ + && rm -rf /var/lib/apt/lists/* + +RUN useradd -m -s /bin/bash testuser && \ + mkdir -p /home/testuser/.ssh && \ + chmod 700 /home/testuser/.ssh && \ + chown testuser:testuser /home/testuser/.ssh + +RUN mkdir -p /run/sshd + +COPY entrypoint.sh /entrypoint.sh +RUN chmod +x /entrypoint.sh + +EXPOSE 22 + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/nbkp/testkit/dockerbuild/entrypoint.sh b/nbkp/testkit/dockerbuild/entrypoint.sh new file mode 100644 index 0000000..0b47fb6 --- /dev/null +++ b/nbkp/testkit/dockerbuild/entrypoint.sh @@ -0,0 +1,30 @@ +#!/bin/bash +set -e + +# Set up authorized keys from mounted file +if [ -f /mnt/ssh-authorized-keys ]; then + cp /mnt/ssh-authorized-keys /home/testuser/.ssh/authorized_keys + chmod 600 /home/testuser/.ssh/authorized_keys + chown testuser:testuser /home/testuser/.ssh/authorized_keys +fi + +if [ -z "$NBKP_BASTION_ONLY" ]; then + # Create btrfs filesystem on a file-backed image + truncate -s 4G /srv/btrfs-backups.img + mkfs.btrfs -f /srv/btrfs-backups.img + mkdir -p /srv/btrfs-backups + mount -o user_subvol_rm_allowed /srv/btrfs-backups.img /srv/btrfs-backups + + # Create base directories + mkdir -p /srv/backups + + # Set ownership + chown -R testuser:testuser /srv/backups + chown -R testuser:testuser /srv/btrfs-backups +fi + +# Generate SSH host keys if not present +ssh-keygen -A + +# Start sshd in foreground +exec /usr/sbin/sshd -D -e diff --git a/nbkp/testkit/gen/__init__.py b/nbkp/testkit/gen/__init__.py new file mode 100644 index 0000000..9d24f65 --- /dev/null +++ b/nbkp/testkit/gen/__init__.py @@ -0,0 +1 @@ +"""Fake data generators for manual testing and output validation.""" diff --git a/nbkp/testkit/gen/check.py b/nbkp/testkit/gen/check.py new file mode 100644 index 0000000..d874971 --- /dev/null +++ b/nbkp/testkit/gen/check.py @@ -0,0 +1,314 @@ +"""Fake check/troubleshoot data for manual testing.""" + +from __future__ import annotations + +from ...check import ( + SyncReason, + SyncStatus, + VolumeReason, + VolumeStatus, +) +from ...config import ( + BtrfsSnapshotConfig, + Config, + DestinationSyncEndpoint, + HardLinkSnapshotConfig, + LocalVolume, + SyncConfig, + SyncEndpoint, +) +from .config import ( + base_ssh_endpoints, + base_syncs, + base_volumes, +) + + +def check_config() -> Config: + """Config with local + remote volumes and varied syncs.""" + volumes = base_volumes() + volumes["external-drive"] = LocalVolume( + slug="external-drive", path="/mnt/external" + ) + syncs = base_syncs() + syncs["disabled-backup"] = SyncConfig( + slug="disabled-backup", + source=SyncEndpoint(volume="laptop"), + destination=DestinationSyncEndpoint( + volume="external-drive", + ), + enabled=False, + ) + return Config( + ssh_endpoints=base_ssh_endpoints(), + volumes=volumes, + syncs=syncs, + ) + + +def check_data( + config: Config, +) -> tuple[dict[str, VolumeStatus], dict[str, SyncStatus]]: + """Volume and sync statuses with mixed active/inactive.""" + laptop_vs = VolumeStatus( + slug="laptop", + config=config.volumes["laptop"], + reasons=[], + ) + usb_vs = VolumeStatus( + slug="usb-drive", + config=config.volumes["usb-drive"], + reasons=[], + ) + nas_vs = VolumeStatus( + slug="nas-backup", + config=config.volumes["nas-backup"], + reasons=[VolumeReason.UNREACHABLE], + ) + external_vs = VolumeStatus( + slug="external-drive", + config=config.volumes["external-drive"], + reasons=[VolumeReason.SENTINEL_NOT_FOUND], + ) + + vol_statuses = { + "laptop": laptop_vs, + "usb-drive": usb_vs, + "nas-backup": nas_vs, + "external-drive": external_vs, + } + + sync_statuses = { + "photos-to-usb": SyncStatus( + slug="photos-to-usb", + config=config.syncs["photos-to-usb"], + source_status=laptop_vs, + destination_status=usb_vs, + reasons=[], + ), + "docs-to-nas": SyncStatus( + slug="docs-to-nas", + config=config.syncs["docs-to-nas"], + source_status=laptop_vs, + destination_status=nas_vs, + reasons=[SyncReason.DESTINATION_UNAVAILABLE], + ), + "music-to-usb": SyncStatus( + slug="music-to-usb", + config=config.syncs["music-to-usb"], + source_status=laptop_vs, + destination_status=usb_vs, + reasons=[], + ), + "disabled-backup": SyncStatus( + slug="disabled-backup", + config=config.syncs["disabled-backup"], + source_status=laptop_vs, + destination_status=external_vs, + reasons=[SyncReason.DISABLED], + ), + } + + return vol_statuses, sync_statuses + + +def troubleshoot_config() -> Config: + """Config designed to trigger every troubleshoot reason.""" + return Config( + ssh_endpoints=base_ssh_endpoints(), + volumes=base_volumes(), + syncs={ + "disabled-sync": SyncConfig( + slug="disabled-sync", + source=SyncEndpoint(volume="laptop"), + destination=DestinationSyncEndpoint( + volume="usb-drive", + ), + enabled=False, + ), + "unavailable-volumes": SyncConfig( + slug="unavailable-volumes", + source=SyncEndpoint(volume="laptop"), + destination=DestinationSyncEndpoint( + volume="nas-backup", + ), + ), + "missing-sentinels": SyncConfig( + slug="missing-sentinels", + source=SyncEndpoint(volume="laptop"), + destination=DestinationSyncEndpoint(volume="usb-drive"), + ), + "rsync-missing": SyncConfig( + slug="rsync-missing", + source=SyncEndpoint(volume="laptop"), + destination=DestinationSyncEndpoint(volume="nas-backup"), + ), + "btrfs-not-detected": SyncConfig( + slug="btrfs-not-detected", + source=SyncEndpoint(volume="laptop"), + destination=DestinationSyncEndpoint( + volume="usb-drive", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ), + "btrfs-mount-issues": SyncConfig( + slug="btrfs-mount-issues", + source=SyncEndpoint(volume="laptop"), + destination=DestinationSyncEndpoint( + volume="nas-backup", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ), + "tools-missing": SyncConfig( + slug="tools-missing", + source=SyncEndpoint(volume="laptop"), + destination=DestinationSyncEndpoint( + volume="usb-drive", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ), + "hardlink-issues": SyncConfig( + slug="hardlink-issues", + source=SyncEndpoint(volume="laptop"), + destination=DestinationSyncEndpoint( + volume="usb-drive", + hard_link_snapshots=HardLinkSnapshotConfig( + enabled=True, max_snapshots=5 + ), + ), + ), + "source-latest-missing": SyncConfig( + slug="source-latest-missing", + source=SyncEndpoint( + volume="usb-drive", + btrfs_snapshots=BtrfsSnapshotConfig( + enabled=True, + ), + ), + destination=DestinationSyncEndpoint( + volume="nas-backup", + ), + ), + }, + ) + + +def troubleshoot_data( + config: Config, +) -> tuple[dict[str, VolumeStatus], dict[str, SyncStatus]]: + """Statuses covering every VolumeReason and SyncReason.""" + laptop_vs = VolumeStatus( + slug="laptop", + config=config.volumes["laptop"], + reasons=[VolumeReason.SENTINEL_NOT_FOUND], + ) + usb_vs = VolumeStatus( + slug="usb-drive", + config=config.volumes["usb-drive"], + reasons=[], + ) + nas_vs = VolumeStatus( + slug="nas-backup", + config=config.volumes["nas-backup"], + reasons=[VolumeReason.UNREACHABLE], + ) + + vol_statuses = { + "laptop": laptop_vs, + "usb-drive": usb_vs, + "nas-backup": nas_vs, + } + + sync_statuses = { + "disabled-sync": SyncStatus( + slug="disabled-sync", + config=config.syncs["disabled-sync"], + source_status=laptop_vs, + destination_status=usb_vs, + reasons=[SyncReason.DISABLED], + ), + "unavailable-volumes": SyncStatus( + slug="unavailable-volumes", + config=config.syncs["unavailable-volumes"], + source_status=laptop_vs, + destination_status=nas_vs, + reasons=[ + SyncReason.SOURCE_UNAVAILABLE, + SyncReason.DESTINATION_UNAVAILABLE, + ], + ), + "missing-sentinels": SyncStatus( + slug="missing-sentinels", + config=config.syncs["missing-sentinels"], + source_status=laptop_vs, + destination_status=usb_vs, + reasons=[ + SyncReason.SOURCE_SENTINEL_NOT_FOUND, + SyncReason.DESTINATION_SENTINEL_NOT_FOUND, + ], + ), + "rsync-missing": SyncStatus( + slug="rsync-missing", + config=config.syncs["rsync-missing"], + source_status=laptop_vs, + destination_status=nas_vs, + reasons=[ + SyncReason.RSYNC_NOT_FOUND_ON_SOURCE, + SyncReason.RSYNC_NOT_FOUND_ON_DESTINATION, + ], + ), + "btrfs-not-detected": SyncStatus( + slug="btrfs-not-detected", + config=config.syncs["btrfs-not-detected"], + source_status=laptop_vs, + destination_status=usb_vs, + reasons=[ + SyncReason.BTRFS_NOT_FOUND_ON_DESTINATION, + SyncReason.DESTINATION_NOT_BTRFS, + SyncReason.DESTINATION_NOT_BTRFS_SUBVOLUME, + ], + ), + "btrfs-mount-issues": SyncStatus( + slug="btrfs-mount-issues", + config=config.syncs["btrfs-mount-issues"], + source_status=laptop_vs, + destination_status=nas_vs, + reasons=[ + SyncReason.DESTINATION_NOT_MOUNTED_USER_SUBVOL_RM, + SyncReason.DESTINATION_LATEST_NOT_FOUND, + SyncReason.DESTINATION_SNAPSHOTS_DIR_NOT_FOUND, + ], + ), + "tools-missing": SyncStatus( + slug="tools-missing", + config=config.syncs["tools-missing"], + source_status=laptop_vs, + destination_status=usb_vs, + reasons=[ + SyncReason.STAT_NOT_FOUND_ON_DESTINATION, + SyncReason.FINDMNT_NOT_FOUND_ON_DESTINATION, + ], + ), + "hardlink-issues": SyncStatus( + slug="hardlink-issues", + config=config.syncs["hardlink-issues"], + source_status=laptop_vs, + destination_status=usb_vs, + reasons=[ + SyncReason.DESTINATION_NO_HARDLINK_SUPPORT, + SyncReason.DESTINATION_SNAPSHOTS_DIR_NOT_FOUND, + ], + ), + "source-latest-missing": SyncStatus( + slug="source-latest-missing", + config=config.syncs["source-latest-missing"], + source_status=usb_vs, + destination_status=nas_vs, + reasons=[ + SyncReason.SOURCE_LATEST_NOT_FOUND, + SyncReason.SOURCE_SNAPSHOTS_DIR_NOT_FOUND, + ], + ), + } + + return vol_statuses, sync_statuses diff --git a/nbkp/testkit/gen/config.py b/nbkp/testkit/gen/config.py new file mode 100644 index 0000000..f4c4a81 --- /dev/null +++ b/nbkp/testkit/gen/config.py @@ -0,0 +1,127 @@ +"""Fake config builders for manual testing.""" + +from __future__ import annotations + +from ...config import ( + BtrfsSnapshotConfig, + Config, + DestinationSyncEndpoint, + HardLinkSnapshotConfig, + LocalVolume, + RemoteVolume, + SshEndpoint, + SyncConfig, + SyncEndpoint, +) + + +def bastion_server() -> SshEndpoint: + return SshEndpoint( + slug="bastion", + host="bastion.example.com", + user="admin", + ) + + +def bastion2_server() -> SshEndpoint: + return SshEndpoint( + slug="bastion2", + host="bastion2.internal", + user="admin", + ) + + +def nas_server() -> SshEndpoint: + return SshEndpoint( + slug="nas", + host="nas.example.com", + port=5022, + user="backup", + key="~/.ssh/nas_ed25519", + proxy_jumps=["bastion", "bastion2"], + locations=["home", "travel"], + ) + + +def nas_public_server() -> SshEndpoint: + return SshEndpoint( + slug="nas-public", + host="nas.public.example.com", + port=5022, + user="backup", + key="~/.ssh/nas_ed25519", + location="travel", + ) + + +def base_volumes() -> dict[str, LocalVolume | RemoteVolume]: + return { + "laptop": LocalVolume(slug="laptop", path="/mnt/data"), + "usb-drive": LocalVolume(slug="usb-drive", path="/mnt/usb-backup"), + "nas-backup": RemoteVolume( + slug="nas-backup", + ssh_endpoint="nas", + ssh_endpoints=["nas", "nas-public"], + path="/volume1/backups", + ), + } + + +def base_ssh_endpoints() -> dict[str, SshEndpoint]: + return { + "bastion": bastion_server(), + "bastion2": bastion2_server(), + "nas": nas_server(), + "nas-public": nas_public_server(), + } + + +def base_syncs() -> dict[str, SyncConfig]: + return { + "photos-to-usb": SyncConfig( + slug="photos-to-usb", + source=SyncEndpoint(volume="laptop", subdir="photos"), + destination=DestinationSyncEndpoint( + volume="usb-drive", + btrfs_snapshots=BtrfsSnapshotConfig( + enabled=True, max_snapshots=10 + ), + ), + filters=["+ *.jpg", "- *.tmp"], + ), + "docs-to-nas": SyncConfig( + slug="docs-to-nas", + source=SyncEndpoint(volume="laptop", subdir="documents"), + destination=DestinationSyncEndpoint( + volume="nas-backup", + subdir="docs", + ), + ), + "music-to-usb": SyncConfig( + slug="music-to-usb", + source=SyncEndpoint(volume="laptop", subdir="music"), + destination=DestinationSyncEndpoint( + volume="usb-drive", + hard_link_snapshots=HardLinkSnapshotConfig( + enabled=True, max_snapshots=5 + ), + ), + ), + "disabled-backup": SyncConfig( + slug="disabled-backup", + source=SyncEndpoint(volume="laptop"), + destination=DestinationSyncEndpoint( + volume="usb-drive", + ), + enabled=False, + ), + } + + +def config_show_config() -> Config: + """Config exercising all display paths for config show.""" + return Config( + ssh_endpoints=base_ssh_endpoints(), + volumes=base_volumes(), + syncs=base_syncs(), + ) diff --git a/nbkp/testkit/gen/fs.py b/nbkp/testkit/gen/fs.py new file mode 100644 index 0000000..d828bf1 --- /dev/null +++ b/nbkp/testkit/gen/fs.py @@ -0,0 +1,258 @@ +"""Seed filesystem helpers: sentinels and sample data.""" + +from __future__ import annotations + +import shlex +import subprocess +from collections.abc import Callable +from pathlib import Path + +from ...config import ( + Config, + LocalVolume, + RemoteVolume, + SyncConfig, +) + +_CHUNK_SIZE = 1024 * 1024 # 1 MB + +_SAMPLE_FILES = [ + ("sample.txt", "Sample data for backup testing\n"), + ("photo.jpg", "fake jpeg data\n"), + ("document.pdf", "fake pdf data\n"), +] + + +def _write_zeroed_file(path: Path, size_bytes: int) -> None: + """Write a zeroed file in chunks to avoid large allocs.""" + chunk = b"\x00" * min(_CHUNK_SIZE, size_bytes) + with path.open("wb") as f: + remaining = size_bytes + while remaining > 0: + f.write(chunk[:remaining]) + remaining -= len(chunk) + + +def create_seed_sentinels( + config: Config, + remote_exec: Callable[[str], None] | None = None, +) -> None: + """Create volume, source, and destination sentinels. + + For local volumes, creates directories and sentinel files + directly. For remote volumes, uses *remote_exec(command)* + to run shell commands on the remote host. + """ + # Volume sentinels (.nbkp-vol) + for vol in config.volumes.values(): + match vol: + case LocalVolume(): + vol_path = Path(vol.path) + vol_path.mkdir(parents=True, exist_ok=True) + (vol_path / ".nbkp-vol").touch() + case RemoteVolume(): + if remote_exec is not None: + remote_exec(f"mkdir -p {vol.path}") + remote_exec(f"touch {vol.path}/.nbkp-vol") + + # Sync endpoint sentinels + for sync in config.syncs.values(): + _create_source_sentinels(config, sync, remote_exec) + _create_dest_sentinels(config, sync, remote_exec) + + +_SEED_SNAPSHOT_NAME = "1970-01-01T00:00:00.000Z" + + +def _create_source_sentinels( + config: Config, + sync: SyncConfig, + remote_exec: Callable[[str], None] | None, +) -> None: + vol = config.volumes[sync.source.volume] + subdir = sync.source.subdir + btrfs = sync.source.btrfs_snapshots + hard_link = sync.source.hard_link_snapshots + + match vol: + case LocalVolume(): + path = Path(vol.path) + if subdir: + path = path / subdir + path.mkdir(parents=True, exist_ok=True) + (path / ".nbkp-src").touch() + if hard_link.enabled: + snap = path / "snapshots" / _SEED_SNAPSHOT_NAME + snap.mkdir(parents=True, exist_ok=True) + latest = path / "latest" + if not latest.exists(): + latest.symlink_to(f"snapshots/{_SEED_SNAPSHOT_NAME}") + elif btrfs.enabled: + (path / "snapshots").mkdir(exist_ok=True) + if not (path / "latest").exists(): + subprocess.run( + [ + "btrfs", + "subvolume", + "create", + str(path / "latest"), + ], + check=True, + ) + case RemoteVolume(): + if remote_exec is not None: + rp = vol.path + if subdir: + rp = f"{rp}/{subdir}" + remote_exec(f"mkdir -p {rp}") + remote_exec(f"touch {rp}/.nbkp-src") + if hard_link.enabled: + snap_rel = f"snapshots/{_SEED_SNAPSHOT_NAME}" + remote_exec(f"mkdir -p {rp}/{snap_rel}") + remote_exec( + f"test -e {rp}/latest" + f" || ln -sfn {snap_rel} {rp}/latest" + ) + elif btrfs.enabled: + remote_exec( + f"test -e {rp}/latest" + " || btrfs subvolume create" + f" {rp}/latest" + ) + remote_exec(f"mkdir -p {rp}/snapshots") + + +def _create_dest_sentinels( + config: Config, + sync: SyncConfig, + remote_exec: Callable[[str], None] | None, +) -> None: + vol = config.volumes[sync.destination.volume] + subdir = sync.destination.subdir + btrfs = sync.destination.btrfs_snapshots + hard_link = sync.destination.hard_link_snapshots + + match vol: + case LocalVolume(): + path = Path(vol.path) + if subdir: + path = path / subdir + path.mkdir(parents=True, exist_ok=True) + (path / ".nbkp-dst").touch() + if hard_link.enabled: + (path / "snapshots").mkdir(exist_ok=True) + elif btrfs.enabled: + if not (path / "latest").exists(): + subprocess.run( + [ + "btrfs", + "subvolume", + "create", + str(path / "latest"), + ], + check=True, + ) + (path / "snapshots").mkdir(exist_ok=True) + case RemoteVolume(): + if remote_exec is not None: + rp = vol.path + if subdir: + rp = f"{rp}/{subdir}" + remote_exec(f"mkdir -p {rp}") + remote_exec(f"touch {rp}/.nbkp-dst") + if hard_link.enabled: + remote_exec(f"mkdir -p {rp}/snapshots") + elif btrfs.enabled: + remote_exec( + f"test -e {rp}/latest" + " || btrfs subvolume create" + f" {rp}/latest" + ) + remote_exec(f"mkdir -p {rp}/snapshots") + + +def _volume_key( + vol: LocalVolume | RemoteVolume, + subdir: str | None, +) -> str: + """Return a dedup key for a volume + subdir combination.""" + match vol: + case LocalVolume(): + base = Path(vol.path) + return str(base / subdir if subdir else base) + case RemoteVolume(): + rp = vol.path + if subdir: + rp = f"{rp}/{subdir}" + return rp + + +def create_seed_data( + config: Config, + big_file_size_mb: int = 0, + remote_exec: Callable[[str], None] | None = None, +) -> None: + """Generate sample files in source volumes. + + Creates a handful of small files in each unique source + path. When *big_file_size_mb* > 0, an additional large + zeroed file is written to slow down syncs for manual + testing. + + For remote source volumes, uses *remote_exec(command)* + to create files on the remote host. + """ + size_bytes = big_file_size_mb * 1024 * 1024 + + unique_sources = { + _volume_key(config.volumes[s.source.volume], s.source.subdir): ( + config.volumes[s.source.volume], + s.source.subdir, + ) + for s in config.syncs.values() + } + for vol, subdir in unique_sources.values(): + seed_volume( + vol, + subdir, + big_file_size_bytes=size_bytes, + remote_exec=remote_exec, + ) + + +def seed_volume( + vol: LocalVolume | RemoteVolume, + subdir: str | None = None, + *, + big_file_size_bytes: int = 0, + remote_exec: Callable[[str], None] | None = None, +) -> None: + """Write sample files into a single source volume.""" + match vol: + case LocalVolume(): + base = Path(vol.path) + path = base / subdir if subdir else base + path.mkdir(parents=True, exist_ok=True) + for name, content in _SAMPLE_FILES: + (path / name).write_text(content) + if big_file_size_bytes: + _write_zeroed_file( + path / "large-file.bin", + big_file_size_bytes, + ) + case RemoteVolume(): + if remote_exec is None: + return + rp = vol.path + if subdir: + rp = f"{rp}/{subdir}" + remote_exec(f"mkdir -p {rp}") + for name, content in _SAMPLE_FILES: + remote_exec( + f"printf %s {shlex.quote(content)}" f" > {rp}/{name}" + ) + if big_file_size_bytes: + remote_exec( + f"truncate -s {big_file_size_bytes}" + f" {rp}/large-file.bin" + ) diff --git a/nbkp/testkit/gen/sync.py b/nbkp/testkit/gen/sync.py new file mode 100644 index 0000000..9abea3f --- /dev/null +++ b/nbkp/testkit/gen/sync.py @@ -0,0 +1,113 @@ +"""Fake sync and prune result builders for manual testing.""" + +from __future__ import annotations + +from ...config import Config +from ...sync import PruneResult, SyncResult + + +def _snap_base(config: Config) -> str: + vol = config.volumes[config.syncs["photos-to-usb"].destination.volume] + return f"{vol.path}/snapshots" + + +def run_results(config: Config) -> list[SyncResult]: + """Sync results: success, success+snapshot, failure.""" + snap_base = _snap_base(config) + snap = f"{snap_base}/2026-02-19T10:30:00.000Z" + src_vol = config.volumes[config.syncs["docs-to-nas"].source.volume] + src_subdir = config.syncs["docs-to-nas"].source.subdir + return [ + SyncResult( + sync_slug="music-to-usb", + success=True, + dry_run=False, + rsync_exit_code=0, + output="", + ), + SyncResult( + sync_slug="photos-to-usb", + success=True, + dry_run=False, + rsync_exit_code=0, + output="", + snapshot_path=snap, + pruned_paths=[ + f"{snap_base}/2026-02-01T08:00:00.000Z", + f"{snap_base}/2026-02-10T12:00:00.000Z", + ], + ), + SyncResult( + sync_slug="docs-to-nas", + success=False, + dry_run=False, + rsync_exit_code=23, + output=( + "rsync: [sender] link_stat" + f' "{src_vol.path}/{src_subdir}" failed:' + " No such file or directory (2)\n" + "rsync error: some files/attrs" + " were not transferred (code 23)\n" + ), + error="rsync exited with code 23", + ), + ] + + +def dry_run_result(config: Config) -> SyncResult: + """Single dry-run success result.""" + return SyncResult( + sync_slug="photos-to-usb", + success=True, + dry_run=True, + rsync_exit_code=0, + output="", + ) + + +def prune_results(config: Config) -> list[PruneResult]: + """Prune results: success, noop, error.""" + snap_base = _snap_base(config) + return [ + PruneResult( + sync_slug="photos-to-usb", + deleted=[ + f"{snap_base}/2026-01-01T00:00:00.000Z", + f"{snap_base}/2026-01-15T00:00:00.000Z", + f"{snap_base}/2026-02-01T00:00:00.000Z", + ], + kept=7, + dry_run=False, + ), + PruneResult( + sync_slug="music-to-usb", + deleted=[], + kept=5, + dry_run=False, + ), + PruneResult( + sync_slug="docs-to-nas", + deleted=[], + kept=0, + dry_run=False, + error="btrfs delete failed:" " Permission denied", + ), + ] + + +def prune_dry_run_results( + config: Config, +) -> list[PruneResult]: + """Prune dry-run results.""" + snap_base = _snap_base(config) + return [ + PruneResult( + sync_slug="photos-to-usb", + deleted=[ + f"{snap_base}/2026-01-01T00:00:00.000Z", + f"{snap_base}/2026-01-15T00:00:00.000Z", + ], + kept=10, + dry_run=True, + ), + ] diff --git a/poetry.lock b/poetry.lock index 3a26e9e..3b225fe 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.0.0 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.3.2 and should not be changed by hand. [[package]] name = "annotated-doc" @@ -12,6 +12,95 @@ files = [ {file = "annotated_doc-0.0.4.tar.gz", hash = "sha256:fbcda96e87e9c92ad167c2e53839e57503ecfda18804ea28102353485033faa4"}, ] +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[[package]] +name = "bcrypt" +version = "5.0.0" +description = "Modern password hashing for your software and your servers" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "bcrypt-5.0.0-cp313-cp313t-macosx_10_12_universal2.whl", hash = "sha256:f3c08197f3039bec79cee59a606d62b96b16669cff3949f21e74796b6e3cd2be"}, + {file = "bcrypt-5.0.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:200af71bc25f22006f4069060c88ed36f8aa4ff7f53e67ff04d2ab3f1e79a5b2"}, + {file = "bcrypt-5.0.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:baade0a5657654c2984468efb7d6c110db87ea63ef5a4b54732e7e337253e44f"}, + {file = "bcrypt-5.0.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:c58b56cdfb03202b3bcc9fd8daee8e8e9b6d7e3163aa97c631dfcfcc24d36c86"}, + {file = "bcrypt-5.0.0-cp313-cp313t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:4bfd2a34de661f34d0bda43c3e4e79df586e4716ef401fe31ea39d69d581ef23"}, + {file = "bcrypt-5.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:ed2e1365e31fc73f1825fa830f1c8f8917ca1b3ca6185773b349c20fd606cec2"}, + {file = "bcrypt-5.0.0-cp313-cp313t-manylinux_2_34_aarch64.whl", hash = "sha256:83e787d7a84dbbfba6f250dd7a5efd689e935f03dd83b0f919d39349e1f23f83"}, + {file = "bcrypt-5.0.0-cp313-cp313t-manylinux_2_34_x86_64.whl", hash = "sha256:137c5156524328a24b9fac1cb5db0ba618bc97d11970b39184c1d87dc4bf1746"}, + {file = "bcrypt-5.0.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:38cac74101777a6a7d3b3e3cfefa57089b5ada650dce2baf0cbdd9d65db22a9e"}, + {file = "bcrypt-5.0.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:d8d65b564ec849643d9f7ea05c6d9f0cd7ca23bdd4ac0c2dbef1104ab504543d"}, + {file = "bcrypt-5.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:741449132f64b3524e95cd30e5cd3343006ce146088f074f31ab26b94e6c75ba"}, + {file = "bcrypt-5.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:212139484ab3207b1f0c00633d3be92fef3c5f0af17cad155679d03ff2ee1e41"}, + {file = "bcrypt-5.0.0-cp313-cp313t-win32.whl", hash = "sha256:9d52ed507c2488eddd6a95bccee4e808d3234fa78dd370e24bac65a21212b861"}, + {file = "bcrypt-5.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:f6984a24db30548fd39a44360532898c33528b74aedf81c26cf29c51ee47057e"}, + {file = "bcrypt-5.0.0-cp313-cp313t-win_arm64.whl", hash = "sha256:9fffdb387abe6aa775af36ef16f55e318dcda4194ddbf82007a6f21da29de8f5"}, + {file = "bcrypt-5.0.0-cp314-cp314t-macosx_10_12_universal2.whl", hash = "sha256:4870a52610537037adb382444fefd3706d96d663ac44cbb2f37e3919dca3d7ef"}, + {file = "bcrypt-5.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:48f753100931605686f74e27a7b49238122aa761a9aefe9373265b8b7aa43ea4"}, + {file = "bcrypt-5.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f70aadb7a809305226daedf75d90379c397b094755a710d7014b8b117df1ebbf"}, + {file = "bcrypt-5.0.0-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:744d3c6b164caa658adcb72cb8cc9ad9b4b75c7db507ab4bc2480474a51989da"}, + {file = "bcrypt-5.0.0-cp314-cp314t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a28bc05039bdf3289d757f49d616ab3efe8cf40d8e8001ccdd621cd4f98f4fc9"}, + {file = "bcrypt-5.0.0-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:7f277a4b3390ab4bebe597800a90da0edae882c6196d3038a73adf446c4f969f"}, + {file = "bcrypt-5.0.0-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:79cfa161eda8d2ddf29acad370356b47f02387153b11d46042e93a0a95127493"}, + {file = "bcrypt-5.0.0-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:a5393eae5722bcef046a990b84dff02b954904c36a194f6cfc817d7dca6c6f0b"}, + {file = "bcrypt-5.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7f4c94dec1b5ab5d522750cb059bb9409ea8872d4494fd152b53cca99f1ddd8c"}, + {file = "bcrypt-5.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0cae4cb350934dfd74c020525eeae0a5f79257e8a201c0c176f4b84fdbf2a4b4"}, + {file = "bcrypt-5.0.0-cp314-cp314t-win32.whl", hash = "sha256:b17366316c654e1ad0306a6858e189fc835eca39f7eb2cafd6aaca8ce0c40a2e"}, + {file = "bcrypt-5.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:92864f54fb48b4c718fc92a32825d0e42265a627f956bc0361fe869f1adc3e7d"}, + {file = "bcrypt-5.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:dd19cf5184a90c873009244586396a6a884d591a5323f0e8a5922560718d4993"}, + {file = "bcrypt-5.0.0-cp38-abi3-macosx_10_12_universal2.whl", hash = "sha256:fc746432b951e92b58317af8e0ca746efe93e66555f1b40888865ef5bf56446b"}, + {file = "bcrypt-5.0.0-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c2388ca94ffee269b6038d48747f4ce8df0ffbea43f31abfa18ac72f0218effb"}, + {file = "bcrypt-5.0.0-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:560ddb6ec730386e7b3b26b8b4c88197aaed924430e7b74666a586ac997249ef"}, + {file = "bcrypt-5.0.0-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d79e5c65dcc9af213594d6f7f1fa2c98ad3fc10431e7aa53c176b441943efbdd"}, + {file = "bcrypt-5.0.0-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2b732e7d388fa22d48920baa267ba5d97cca38070b69c0e2d37087b381c681fd"}, + {file = "bcrypt-5.0.0-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:0c8e093ea2532601a6f686edbc2c6b2ec24131ff5c52f7610dd64fa4553b5464"}, + {file = "bcrypt-5.0.0-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:5b1589f4839a0899c146e8892efe320c0fa096568abd9b95593efac50a87cb75"}, + {file = "bcrypt-5.0.0-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:89042e61b5e808b67daf24a434d89bab164d4de1746b37a8d173b6b14f3db9ff"}, + {file = "bcrypt-5.0.0-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:e3cf5b2560c7b5a142286f69bde914494b6d8f901aaa71e453078388a50881c4"}, + {file = "bcrypt-5.0.0-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:f632fd56fc4e61564f78b46a2269153122db34988e78b6be8b32d28507b7eaeb"}, + {file = "bcrypt-5.0.0-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:801cad5ccb6b87d1b430f183269b94c24f248dddbbc5c1f78b6ed231743e001c"}, + {file = "bcrypt-5.0.0-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3cf67a804fc66fc217e6914a5635000259fbbbb12e78a99488e4d5ba445a71eb"}, + {file = "bcrypt-5.0.0-cp38-abi3-win32.whl", hash = "sha256:3abeb543874b2c0524ff40c57a4e14e5d3a66ff33fb423529c88f180fd756538"}, + {file = "bcrypt-5.0.0-cp38-abi3-win_amd64.whl", hash = "sha256:35a77ec55b541e5e583eb3436ffbbf53b0ffa1fa16ca6782279daf95d146dcd9"}, + {file = "bcrypt-5.0.0-cp38-abi3-win_arm64.whl", hash = "sha256:cde08734f12c6a4e28dc6755cd11d3bdfea608d93d958fffbe95a7026ebe4980"}, + {file = "bcrypt-5.0.0-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:0c418ca99fd47e9c59a301744d63328f17798b5947b0f791e9af3c1c499c2d0a"}, + {file = "bcrypt-5.0.0-cp39-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ddb4e1500f6efdd402218ffe34d040a1196c072e07929b9820f363a1fd1f4191"}, + {file = "bcrypt-5.0.0-cp39-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7aeef54b60ceddb6f30ee3db090351ecf0d40ec6e2abf41430997407a46d2254"}, + {file = "bcrypt-5.0.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f0ce778135f60799d89c9693b9b398819d15f1921ba15fe719acb3178215a7db"}, + {file = "bcrypt-5.0.0-cp39-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a71f70ee269671460b37a449f5ff26982a6f2ba493b3eabdd687b4bf35f875ac"}, + {file = "bcrypt-5.0.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f8429e1c410b4073944f03bd778a9e066e7fad723564a52ff91841d278dfc822"}, + {file = "bcrypt-5.0.0-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:edfcdcedd0d0f05850c52ba3127b1fce70b9f89e0fe5ff16517df7e81fa3cbb8"}, + {file = "bcrypt-5.0.0-cp39-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:611f0a17aa4a25a69362dcc299fda5c8a3d4f160e2abb3831041feb77393a14a"}, + {file = "bcrypt-5.0.0-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:db99dca3b1fdc3db87d7c57eac0c82281242d1eabf19dcb8a6b10eb29a2e72d1"}, + {file = "bcrypt-5.0.0-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:5feebf85a9cefda32966d8171f5db7e3ba964b77fdfe31919622256f80f9cf42"}, + {file = "bcrypt-5.0.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:3ca8a166b1140436e058298a34d88032ab62f15aae1c598580333dc21d27ef10"}, + {file = "bcrypt-5.0.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:61afc381250c3182d9078551e3ac3a41da14154fbff647ddf52a769f588c4172"}, + {file = "bcrypt-5.0.0-cp39-abi3-win32.whl", hash = "sha256:64d7ce196203e468c457c37ec22390f1a61c85c6f0b8160fd752940ccfb3a683"}, + {file = "bcrypt-5.0.0-cp39-abi3-win_amd64.whl", hash = "sha256:64ee8434b0da054d830fa8e89e1c8bf30061d539044a39524ff7dec90481e5c2"}, + {file = "bcrypt-5.0.0-cp39-abi3-win_arm64.whl", hash = "sha256:f2347d3534e76bf50bca5500989d6c1d05ed64b440408057a37673282c654927"}, + {file = "bcrypt-5.0.0-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7edda91d5ab52b15636d9c30da87d2cc84f426c72b9dba7a9b4fe142ba11f534"}, + {file = "bcrypt-5.0.0-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:046ad6db88edb3c5ece4369af997938fb1c19d6a699b9c1b27b0db432faae4c4"}, + {file = "bcrypt-5.0.0-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:dcd58e2b3a908b5ecc9b9df2f0085592506ac2d5110786018ee5e160f28e0911"}, + {file = "bcrypt-5.0.0-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:6b8f520b61e8781efee73cba14e3e8c9556ccfb375623f4f97429544734545b4"}, + {file = "bcrypt-5.0.0.tar.gz", hash = "sha256:f748f7c2d6fd375cc93d3fba7ef4a9e3a092421b8dbf34d8d4dc06be9492dfdd"}, +] + +[package.extras] +tests = ["pytest (>=3.2.1,!=3.3.0)"] +typecheck = ["mypy"] + [[package]] name = "black" version = "26.1.0" @@ -63,13 +152,25 @@ d = ["aiohttp (>=3.10)"] jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] uvloop = ["uvloop (>=0.15.2)"] +[[package]] +name = "certifi" +version = "2026.1.4" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c"}, + {file = "certifi-2026.1.4.tar.gz", hash = "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120"}, +] + [[package]] name = "cffi" version = "2.0.0" description = "Foreign Function Interface for Python calling C code." optional = false python-versions = ">=3.9" -groups = ["main"] +groups = ["main", "dev"] markers = "platform_python_implementation != \"PyPy\"" files = [ {file = "cffi-2.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44"}, @@ -161,16 +262,139 @@ files = [ [package.dependencies] pycparser = {version = "*", markers = "implementation_name != \"PyPy\""} +[[package]] +name = "charset-normalizer" +version = "3.4.4" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "charset_normalizer-3.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e824f1492727fa856dd6eda4f7cee25f8518a12f3c4a56a74e8095695089cf6d"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4bd5d4137d500351a30687c2d3971758aac9a19208fc110ccb9d7188fbe709e8"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:027f6de494925c0ab2a55eab46ae5129951638a49a34d87f4c3eda90f696b4ad"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f820802628d2694cb7e56db99213f930856014862f3fd943d290ea8438d07ca8"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:798d75d81754988d2565bff1b97ba5a44411867c0cf32b77a7e8f8d84796b10d"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d1bb833febdff5c8927f922386db610b49db6e0d4f4ee29601d71e7c2694313"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:9cd98cdc06614a2f768d2b7286d66805f94c48cde050acdbbb7db2600ab3197e"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:077fbb858e903c73f6c9db43374fd213b0b6a778106bc7032446a8e8b5b38b93"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:244bfb999c71b35de57821b8ea746b24e863398194a4014e4c76adc2bbdfeff0"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:64b55f9dce520635f018f907ff1b0df1fdc31f2795a922fb49dd14fbcdf48c84"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:faa3a41b2b66b6e50f84ae4a68c64fcd0c44355741c6374813a800cd6695db9e"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6515f3182dbe4ea06ced2d9e8666d97b46ef4c75e326b79bb624110f122551db"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cc00f04ed596e9dc0da42ed17ac5e596c6ccba999ba6bd92b0e0aef2f170f2d6"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-win32.whl", hash = "sha256:f34be2938726fc13801220747472850852fe6b1ea75869a048d6f896838c896f"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:a61900df84c667873b292c3de315a786dd8dac506704dea57bc957bd31e22c7d"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-win_arm64.whl", hash = "sha256:cead0978fc57397645f12578bfd2d5ea9138ea0fac82b2f63f7f7c6877986a69"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6e1fcf0720908f200cd21aa4e6750a48ff6ce4afe7ff5a79a90d5ed8a08296f8"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f819d5fe9234f9f82d75bdfa9aef3a3d72c4d24a6e57aeaebba32a704553aa0"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a59cb51917aa591b1c4e6a43c132f0cdc3c76dbad6155df4e28ee626cc77a0a3"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8ef3c867360f88ac904fd3f5e1f902f13307af9052646963ee08ff4f131adafc"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d9e45d7faa48ee908174d8fe84854479ef838fc6a705c9315372eacbc2f02897"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:840c25fb618a231545cbab0564a799f101b63b9901f2569faecd6b222ac72381"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ca5862d5b3928c4940729dacc329aa9102900382fea192fc5e52eb69d6093815"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d9c7f57c3d666a53421049053eaacdd14bbd0a528e2186fcb2e672effd053bb0"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:277e970e750505ed74c832b4bf75dac7476262ee2a013f5574dd49075879e161"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:31fd66405eaf47bb62e8cd575dc621c56c668f27d46a61d975a249930dd5e2a4"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:0d3d8f15c07f86e9ff82319b3d9ef6f4bf907608f53fe9d92b28ea9ae3d1fd89"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:9f7fcd74d410a36883701fafa2482a6af2ff5ba96b9a620e9e0721e28ead5569"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ebf3e58c7ec8a8bed6d66a75d7fb37b55e5015b03ceae72a8e7c74495551e224"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-win32.whl", hash = "sha256:eecbc200c7fd5ddb9a7f16c7decb07b566c29fa2161a16cf67b8d068bd21690a"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:5ae497466c7901d54b639cf42d5b8c1b6a4fead55215500d2f486d34db48d016"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-win_arm64.whl", hash = "sha256:65e2befcd84bc6f37095f5961e68a6f077bf44946771354a28ad434c2cce0ae1"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-win32.whl", hash = "sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-win_arm64.whl", hash = "sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ce8a0633f41a967713a59c4139d29110c07e826d131a316b50ce11b1d79b4f84"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eaabd426fe94daf8fd157c32e571c85cb12e66692f15516a83a03264b08d06c3"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c4ef880e27901b6cc782f1b95f82da9313c0eb95c3af699103088fa0ac3ce9ac"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2aaba3b0819274cc41757a1da876f810a3e4d7b6eb25699253a4effef9e8e4af"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:778d2e08eda00f4256d7f672ca9fef386071c9202f5e4607920b86d7803387f2"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f155a433c2ec037d4e8df17d18922c3a0d9b3232a396690f17175d2946f0218d"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a8bf8d0f749c5757af2142fe7903a9df1d2e8aa3841559b2bad34b08d0e2bcf3"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:194f08cbb32dc406d6e1aea671a68be0823673db2832b38405deba2fb0d88f63"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:6aee717dcfead04c6eb1ce3bd29ac1e22663cdea57f943c87d1eab9a025438d7"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:cd4b7ca9984e5e7985c12bc60a6f173f3c958eae74f3ef6624bb6b26e2abbae4"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_riscv64.whl", hash = "sha256:b7cf1017d601aa35e6bb650b6ad28652c9cd78ee6caff19f3c28d03e1c80acbf"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:e912091979546adf63357d7e2ccff9b44f026c075aeaf25a52d0e95ad2281074"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:5cb4d72eea50c8868f5288b7f7f33ed276118325c1dfd3957089f6b519e1382a"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-win32.whl", hash = "sha256:837c2ce8c5a65a2035be9b3569c684358dfbf109fd3b6969630a87535495ceaa"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:44c2a8734b333e0578090c4cd6b16f275e07aa6614ca8715e6c038e865e70576"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a9768c477b9d7bd54bc0c86dbaebdec6f03306675526c9927c0e8a04e8f94af9"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1bee1e43c28aa63cb16e5c14e582580546b08e535299b8b6158a7c9c768a1f3d"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:fd44c878ea55ba351104cb93cc85e74916eb8fa440ca7903e57575e97394f608"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0f04b14ffe5fdc8c4933862d8306109a2c51e0704acfa35d51598eb45a1e89fc"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:cd09d08005f958f370f539f186d10aec3377d55b9eeb0d796025d4886119d76e"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4fe7859a4e3e8457458e2ff592f15ccb02f3da787fcd31e0183879c3ad4692a1"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fa09f53c465e532f4d3db095e0c55b615f010ad81803d383195b6b5ca6cbf5f3"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7fa17817dc5625de8a027cb8b26d9fefa3ea28c8253929b8d6649e705d2835b6"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:5947809c8a2417be3267efc979c47d76a079758166f7d43ef5ae8e9f92751f88"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:4902828217069c3c5c71094537a8e623f5d097858ac6ca8252f7b4d10b7560f1"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:7c308f7e26e4363d79df40ca5b2be1c6ba9f02bdbccfed5abddb7859a6ce72cf"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:2c9d3c380143a1fedbff95a312aa798578371eb29da42106a29019368a475318"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:cb01158d8b88ee68f15949894ccc6712278243d95f344770fa7593fa2d94410c"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-win32.whl", hash = "sha256:2677acec1a2f8ef614c6888b5b4ae4060cc184174a938ed4e8ef690e15d3e505"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:f8e160feb2aed042cd657a72acc0b481212ed28b1b9a95c0cee1621b524e1966"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-win_arm64.whl", hash = "sha256:b5d84d37db046c5ca74ee7bb47dd6cbc13f80665fdde3e8040bdd3fb015ecb50"}, + {file = "charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f"}, + {file = "charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a"}, +] + [[package]] name = "click" -version = "8.2.1" +version = "8.3.1" description = "Composable command line interface toolkit" optional = false python-versions = ">=3.10" groups = ["main", "dev"] files = [ - {file = "click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"}, - {file = "click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202"}, + {file = "click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6"}, + {file = "click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a"}, ] [package.dependencies] @@ -195,7 +419,7 @@ version = "46.0.5" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = "!=3.9.0,!=3.9.1,>=3.8" -groups = ["main"] +groups = ["main", "dev"] files = [ {file = "cryptography-46.0.5-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:351695ada9ea9618b3500b490ad54c739860883df6c1f555e088eaf25b1bbaad"}, {file = "cryptography-46.0.5-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c18ff11e86df2e28854939acde2d003f7984f721eba450b56a200ad90eeb0e6b"}, @@ -249,7 +473,7 @@ files = [ ] [package.dependencies] -cffi = {version = ">=2.0.0", markers = "python_full_version >= \"3.9\" and platform_python_implementation != \"PyPy\""} +cffi = {version = ">=2.0.0", markers = "python_full_version >= \"3.9.0\" and platform_python_implementation != \"PyPy\""} [package.extras] docs = ["sphinx (>=5.3.0)", "sphinx-inline-tabs", "sphinx-rtd-theme (>=3.0.0)"] @@ -261,6 +485,80 @@ ssh = ["bcrypt (>=3.1.5)"] test = ["certifi (>=2024)", "cryptography-vectors (==46.0.5)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] test-randomorder = ["pytest-randomly"] +[[package]] +name = "decorator" +version = "5.2.1" +description = "Decorators for Humans" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a"}, + {file = "decorator-5.2.1.tar.gz", hash = "sha256:65f266143752f734b0a7cc83c46f4618af75b8c5911b00ccb61d0ac9b6da0360"}, +] + +[[package]] +name = "deprecated" +version = "1.3.1" +description = "Python @deprecated decorator to deprecate old python classes, functions or methods." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" +groups = ["main"] +files = [ + {file = "deprecated-1.3.1-py2.py3-none-any.whl", hash = "sha256:597bfef186b6f60181535a29fbe44865ce137a5079f295b479886c82729d5f3f"}, + {file = "deprecated-1.3.1.tar.gz", hash = "sha256:b1b50e0ff0c1fddaa5708a2c6b0a6588bb09b892825ab2b214ac9ea9d92a5223"}, +] + +[package.dependencies] +wrapt = ">=1.10,<3" + +[package.extras] +dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools ; python_version >= \"3.12\"", "tox"] + +[[package]] +name = "docker" +version = "7.1.0" +description = "A Python library for the Docker Engine API." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "docker-7.1.0-py3-none-any.whl", hash = "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0"}, + {file = "docker-7.1.0.tar.gz", hash = "sha256:ad8c70e6e3f8926cb8a92619b832b4ea5299e2831c14284663184e200546fa6c"}, +] + +[package.dependencies] +pywin32 = {version = ">=304", markers = "sys_platform == \"win32\""} +requests = ">=2.26.0" +urllib3 = ">=1.26.0" + +[package.extras] +dev = ["coverage (==7.2.7)", "pytest (==7.4.2)", "pytest-cov (==4.1.0)", "pytest-timeout (==2.1.0)", "ruff (==0.1.8)"] +docs = ["myst-parser (==0.18.0)", "sphinx (==5.1.1)"] +ssh = ["paramiko (>=2.4.3)"] +websockets = ["websocket-client (>=1.3.0)"] + +[[package]] +name = "fabric" +version = "3.2.2" +description = "High level SSH command execution" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "fabric-3.2.2-py3-none-any.whl", hash = "sha256:91c47c0be68b14936c88b34da8a1f55e5710fd28397dac5d4ff2e21558113a6f"}, + {file = "fabric-3.2.2.tar.gz", hash = "sha256:8783ca42e3b0076f08b26901aac6b9d9b1f19c410074e7accfab902c184ff4a3"}, +] + +[package.dependencies] +decorator = ">=5" +deprecated = ">=1.2" +invoke = ">=2.0" +paramiko = ">=2.4" + +[package.extras] +pytest = ["pytest (>=7)"] + [[package]] name = "flake8" version = "7.3.0" @@ -278,129 +576,174 @@ mccabe = ">=0.7.0,<0.8.0" pycodestyle = ">=2.14.0,<2.15.0" pyflakes = ">=3.4.0,<3.5.0" +[[package]] +name = "idna" +version = "3.11" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea"}, + {file = "idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902"}, +] + +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + [[package]] name = "iniconfig" -version = "2.1.0" +version = "2.3.0" description = "brain-dead simple config-ini parsing" optional = false -python-versions = ">=3.8" +python-versions = ">=3.10" groups = ["dev"] files = [ - {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, - {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, + {file = "iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12"}, + {file = "iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730"}, ] +[[package]] +name = "invoke" +version = "2.2.1" +description = "Pythonic task execution" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "invoke-2.2.1-py3-none-any.whl", hash = "sha256:2413bc441b376e5cd3f55bb5d364f973ad8bdd7bf87e53c79de3c11bf3feecc8"}, + {file = "invoke-2.2.1.tar.gz", hash = "sha256:515bf49b4a48932b79b024590348da22f39c4942dff991ad1fb8b8baea1be707"}, +] + +[[package]] +name = "jinja2" +version = "3.1.6" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, + {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + [[package]] name = "librt" -version = "0.8.0" +version = "0.8.1" description = "Mypyc runtime library" optional = false python-versions = ">=3.9" groups = ["dev"] markers = "platform_python_implementation != \"PyPy\"" files = [ - {file = "librt-0.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:db63cf3586a24241e89ca1ce0b56baaec9d371a328bd186c529b27c914c9a1ef"}, - {file = "librt-0.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ba9d9e60651615bc614be5e21a82cdb7b1769a029369cf4b4d861e4f19686fb6"}, - {file = "librt-0.8.0-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:cb4b3ad543084ed79f186741470b251b9d269cd8b03556f15a8d1a99a64b7de5"}, - {file = "librt-0.8.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3d2720335020219197380ccfa5c895f079ac364b4c429e96952cd6509934d8eb"}, - {file = "librt-0.8.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9726305d3e53419d27fc8cdfcd3f9571f0ceae22fa6b5ea1b3662c2e538f833e"}, - {file = "librt-0.8.0-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cc3d107f603b5ee7a79b6aa6f166551b99b32fb4a5303c4dfcb4222fc6a0335e"}, - {file = "librt-0.8.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:41064a0c07b4cc7a81355ccc305cb097d6027002209ffca51306e65ee8293630"}, - {file = "librt-0.8.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c6e4c10761ddbc0d67d2f6e2753daf99908db85d8b901729bf2bf5eaa60e0567"}, - {file = "librt-0.8.0-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:ba581acad5ac8f33e2ff1746e8a57e001b47c6721873121bf8bbcf7ba8bd3aa4"}, - {file = "librt-0.8.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bdab762e2c0b48bab76f1a08acb3f4c77afd2123bedac59446aeaaeed3d086cf"}, - {file = "librt-0.8.0-cp310-cp310-win32.whl", hash = "sha256:6a3146c63220d814c4a2c7d6a1eacc8d5c14aed0ff85115c1dfea868080cd18f"}, - {file = "librt-0.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:bbebd2bba5c6ae02907df49150e55870fdd7440d727b6192c46b6f754723dde9"}, - {file = "librt-0.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0ce33a9778e294507f3a0e3468eccb6a698b5166df7db85661543eca1cfc5369"}, - {file = "librt-0.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8070aa3368559de81061ef752770d03ca1f5fc9467d4d512d405bd0483bfffe6"}, - {file = "librt-0.8.0-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:20f73d4fecba969efc15cdefd030e382502d56bb6f1fc66b580cce582836c9fa"}, - {file = "librt-0.8.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a512c88900bdb1d448882f5623a0b1ad27ba81a9bd75dacfe17080b72272ca1f"}, - {file = "librt-0.8.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:015e2dde6e096d27c10238bf9f6492ba6c65822dfb69d2bf74c41a8e88b7ddef"}, - {file = "librt-0.8.0-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:1c25a131013eadd3c600686a0c0333eb2896483cbc7f65baa6a7ee761017aef9"}, - {file = "librt-0.8.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:21b14464bee0b604d80a638cf1ee3148d84ca4cc163dcdcecb46060c1b3605e4"}, - {file = "librt-0.8.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:05a3dd3f116747f7e1a2b475ccdc6fb637fd4987126d109e03013a79d40bf9e6"}, - {file = "librt-0.8.0-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:fa37f99bff354ff191c6bcdffbc9d7cdd4fc37faccfc9be0ef3a4fd5613977da"}, - {file = "librt-0.8.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1566dbb9d1eb0987264c9b9460d212e809ba908d2f4a3999383a84d765f2f3f1"}, - {file = "librt-0.8.0-cp311-cp311-win32.whl", hash = "sha256:70defb797c4d5402166787a6b3c66dfb3fa7f93d118c0509ffafa35a392f4258"}, - {file = "librt-0.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:db953b675079884ffda33d1dca7189fb961b6d372153750beb81880384300817"}, - {file = "librt-0.8.0-cp311-cp311-win_arm64.whl", hash = "sha256:75d1a8cab20b2043f03f7aab730551e9e440adc034d776f15f6f8d582b0a5ad4"}, - {file = "librt-0.8.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:17269dd2745dbe8e42475acb28e419ad92dfa38214224b1b01020b8cac70b645"}, - {file = "librt-0.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f4617cef654fca552f00ce5ffdf4f4b68770f18950e4246ce94629b789b92467"}, - {file = "librt-0.8.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:5cb11061a736a9db45e3c1293cfcb1e3caf205912dfa085734ba750f2197ff9a"}, - {file = "librt-0.8.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b4bb00bd71b448f16749909b08a0ff16f58b079e2261c2e1000f2bbb2a4f0a45"}, - {file = "librt-0.8.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:95a719a049f0eefaf1952673223cf00d442952273cbd20cf2ed7ec423a0ef58d"}, - {file = "librt-0.8.0-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bd32add59b58fba3439d48d6f36ac695830388e3da3e92e4fc26d2d02670d19c"}, - {file = "librt-0.8.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4f764b2424cb04524ff7a486b9c391e93f93dc1bd8305b2136d25e582e99aa2f"}, - {file = "librt-0.8.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:f04ca50e847abc486fa8f4107250566441e693779a5374ba211e96e238f298b9"}, - {file = "librt-0.8.0-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:9ab3a3475a55b89b87ffd7e6665838e8458e0b596c22e0177e0f961434ec474a"}, - {file = "librt-0.8.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3e36a8da17134ffc29373775d88c04832f9ecfab1880470661813e6c7991ef79"}, - {file = "librt-0.8.0-cp312-cp312-win32.whl", hash = "sha256:4eb5e06ebcc668677ed6389164f52f13f71737fc8be471101fa8b4ce77baeb0c"}, - {file = "librt-0.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:0a33335eb59921e77c9acc05d0e654e4e32e45b014a4d61517897c11591094f8"}, - {file = "librt-0.8.0-cp312-cp312-win_arm64.whl", hash = "sha256:24a01c13a2a9bdad20997a4443ebe6e329df063d1978bbe2ebbf637878a46d1e"}, - {file = "librt-0.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7f820210e21e3a8bf8fde2ae3c3d10106d4de9ead28cbfdf6d0f0f41f5b12fa1"}, - {file = "librt-0.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4831c44b8919e75ca0dfb52052897c1ef59fdae19d3589893fbd068f1e41afbf"}, - {file = "librt-0.8.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:88c6e75540f1f10f5e0fc5e87b4b6c290f0e90d1db8c6734f670840494764af8"}, - {file = "librt-0.8.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9646178cd794704d722306c2c920c221abbf080fede3ba539d5afdec16c46dad"}, - {file = "librt-0.8.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6e1af31a710e17891d9adf0dbd9a5fcd94901a3922a96499abdbf7ce658f4e01"}, - {file = "librt-0.8.0-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:507e94f4bec00b2f590fbe55f48cd518a208e2474a3b90a60aa8f29136ddbada"}, - {file = "librt-0.8.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f1178e0de0c271231a660fbef9be6acdfa1d596803464706862bef6644cc1cae"}, - {file = "librt-0.8.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:71fc517efc14f75c2f74b1f0a5d5eb4a8e06aa135c34d18eaf3522f4a53cd62d"}, - {file = "librt-0.8.0-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:0583aef7e9a720dd40f26a2ad5a1bf2ccbb90059dac2b32ac516df232c701db3"}, - {file = "librt-0.8.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5d0f76fc73480d42285c609c0ea74d79856c160fa828ff9aceab574ea4ecfd7b"}, - {file = "librt-0.8.0-cp313-cp313-win32.whl", hash = "sha256:e79dbc8f57de360f0ed987dc7de7be814b4803ef0e8fc6d3ff86e16798c99935"}, - {file = "librt-0.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:25b3e667cbfc9000c4740b282df599ebd91dbdcc1aa6785050e4c1d6be5329ab"}, - {file = "librt-0.8.0-cp313-cp313-win_arm64.whl", hash = "sha256:e9a3a38eb4134ad33122a6d575e6324831f930a771d951a15ce232e0237412c2"}, - {file = "librt-0.8.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:421765e8c6b18e64d21c8ead315708a56fc24f44075059702e421d164575fdda"}, - {file = "librt-0.8.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:48f84830a8f8ad7918afd743fd7c4eb558728bceab7b0e38fd5a5cf78206a556"}, - {file = "librt-0.8.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9f09d4884f882baa39a7e36bbf3eae124c4ca2a223efb91e567381d1c55c6b06"}, - {file = "librt-0.8.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:693697133c3b32aa9b27f040e3691be210e9ac4d905061859a9ed519b1d5a376"}, - {file = "librt-0.8.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c5512aae4648152abaf4d48b59890503fcbe86e85abc12fb9b096fe948bdd816"}, - {file = "librt-0.8.0-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:995d24caa6bbb34bcdd4a41df98ac6d1af637cfa8975cb0790e47d6623e70e3e"}, - {file = "librt-0.8.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b9aef96d7593584e31ef6ac1eb9775355b0099fee7651fae3a15bc8657b67b52"}, - {file = "librt-0.8.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:4f6e975377fbc4c9567cb33ea9ab826031b6c7ec0515bfae66a4fb110d40d6da"}, - {file = "librt-0.8.0-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:daae5e955764be8fd70a93e9e5133c75297f8bce1e802e1d3683b98f77e1c5ab"}, - {file = "librt-0.8.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:7bd68cebf3131bb920d5984f75fe302d758db33264e44b45ad139385662d7bc3"}, - {file = "librt-0.8.0-cp314-cp314-win32.whl", hash = "sha256:1e6811cac1dcb27ca4c74e0ca4a5917a8e06db0d8408d30daee3a41724bfde7a"}, - {file = "librt-0.8.0-cp314-cp314-win_amd64.whl", hash = "sha256:178707cda89d910c3b28bf5aa5f69d3d4734e0f6ae102f753ad79edef83a83c7"}, - {file = "librt-0.8.0-cp314-cp314-win_arm64.whl", hash = "sha256:3e8b77b5f54d0937b26512774916041756c9eb3e66f1031971e626eea49d0bf4"}, - {file = "librt-0.8.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:789911e8fa40a2e82f41120c936b1965f3213c67f5a483fc5a41f5839a05dcbb"}, - {file = "librt-0.8.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:2b37437e7e4ef5e15a297b36ba9e577f73e29564131d86dd75875705e97402b5"}, - {file = "librt-0.8.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:671a6152edf3b924d98a5ed5e6982ec9cb30894085482acadce0975f031d4c5c"}, - {file = "librt-0.8.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8992ca186a1678107b0af3d0c9303d8c7305981b9914989b9788319ed4d89546"}, - {file = "librt-0.8.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:001e5330093d887b8b9165823eca6c5c4db183fe4edea4fdc0680bbac5f46944"}, - {file = "librt-0.8.0-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:d920789eca7ef71df7f31fd547ec0d3002e04d77f30ba6881e08a630e7b2c30e"}, - {file = "librt-0.8.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:82fb4602d1b3e303a58bfe6165992b5a78d823ec646445356c332cd5f5bbaa61"}, - {file = "librt-0.8.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:4d3e38797eb482485b486898f89415a6ab163bc291476bd95712e42cf4383c05"}, - {file = "librt-0.8.0-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:a905091a13e0884701226860836d0386b88c72ce5c2fdfba6618e14c72be9f25"}, - {file = "librt-0.8.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:375eda7acfce1f15f5ed56cfc960669eefa1ec8732e3e9087c3c4c3f2066759c"}, - {file = "librt-0.8.0-cp314-cp314t-win32.whl", hash = "sha256:2ccdd20d9a72c562ffb73098ac411de351b53a6fbb3390903b2d33078ef90447"}, - {file = "librt-0.8.0-cp314-cp314t-win_amd64.whl", hash = "sha256:25e82d920d4d62ad741592fcf8d0f3bda0e3fc388a184cb7d2f566c681c5f7b9"}, - {file = "librt-0.8.0-cp314-cp314t-win_arm64.whl", hash = "sha256:92249938ab744a5890580d3cb2b22042f0dce71cdaa7c1369823df62bedf7cbc"}, - {file = "librt-0.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4b705f85311ee76acec5ee70806990a51f0deb519ea0c29c1d1652d79127604d"}, - {file = "librt-0.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7ce0a8cb67e702dcb06342b2aaaa3da9fb0ddc670417879adfa088b44cf7b3b6"}, - {file = "librt-0.8.0-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:aaadec87f45a3612b6818d1db5fbfe93630669b7ee5d6bdb6427ae08a1aa2141"}, - {file = "librt-0.8.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:56901f1eec031396f230db71c59a01d450715cbbef9856bf636726994331195d"}, - {file = "librt-0.8.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b055bb3abaf69abed25743d8fc1ab691e4f51a912ee0a6f9a6c84f4bbddb283d"}, - {file = "librt-0.8.0-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:1ef3bd856373cf8e7382402731f43bfe978a8613b4039e49e166e1e0dc590216"}, - {file = "librt-0.8.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2e0ffe88ebb5962f8fb0ddcbaaff30f1ea06a79501069310e1e030eafb1ad787"}, - {file = "librt-0.8.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:82e61cd1c563745ad495387c3b65806bfd453badb4adbc019df3389dddee1bf6"}, - {file = "librt-0.8.0-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:667e2513cf69bfd1e1ed9a00d6c736d5108714ec071192afb737987955888a25"}, - {file = "librt-0.8.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6b6caff69e25d80c269b1952be8493b4d94ef745f438fa619d7931066bdd26de"}, - {file = "librt-0.8.0-cp39-cp39-win32.whl", hash = "sha256:02a9fe85410cc9bef045e7cb7fd26fdde6669e6d173f99df659aa7f6335961e9"}, - {file = "librt-0.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:de076eaba208d16efb5962f99539867f8e2c73480988cb513fcf1b5dbb0c9dcf"}, - {file = "librt-0.8.0.tar.gz", hash = "sha256:cb74cdcbc0103fc988e04e5c58b0b31e8e5dd2babb9182b6f9490488eb36324b"}, + {file = "librt-0.8.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:81fd938344fecb9373ba1b155968c8a329491d2ce38e7ddb76f30ffb938f12dc"}, + {file = "librt-0.8.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5db05697c82b3a2ec53f6e72b2ed373132b0c2e05135f0696784e97d7f5d48e7"}, + {file = "librt-0.8.1-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d56bc4011975f7460bea7b33e1ff425d2f1adf419935ff6707273c77f8a4ada6"}, + {file = "librt-0.8.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5cdc0f588ff4b663ea96c26d2a230c525c6fc62b28314edaaaca8ed5af931ad0"}, + {file = "librt-0.8.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:97c2b54ff6717a7a563b72627990bec60d8029df17df423f0ed37d56a17a176b"}, + {file = "librt-0.8.1-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:8f1125e6bbf2f1657d9a2f3ccc4a2c9b0c8b176965bb565dd4d86be67eddb4b6"}, + {file = "librt-0.8.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8f4bb453f408137d7581be309b2fbc6868a80e7ef60c88e689078ee3a296ae71"}, + {file = "librt-0.8.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c336d61d2fe74a3195edc1646d53ff1cddd3a9600b09fa6ab75e5514ba4862a7"}, + {file = "librt-0.8.1-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:eb5656019db7c4deacf0c1a55a898c5bb8f989be904597fcb5232a2f4828fa05"}, + {file = "librt-0.8.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c25d9e338d5bed46c1632f851babf3d13c78f49a225462017cf5e11e845c5891"}, + {file = "librt-0.8.1-cp310-cp310-win32.whl", hash = "sha256:aaab0e307e344cb28d800957ef3ec16605146ef0e59e059a60a176d19543d1b7"}, + {file = "librt-0.8.1-cp310-cp310-win_amd64.whl", hash = "sha256:56e04c14b696300d47b3bc5f1d10a00e86ae978886d0cee14e5714fafb5df5d2"}, + {file = "librt-0.8.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:681dc2451d6d846794a828c16c22dc452d924e9f700a485b7ecb887a30aad1fd"}, + {file = "librt-0.8.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a3b4350b13cc0e6f5bec8fa7caf29a8fb8cdc051a3bae45cfbfd7ce64f009965"}, + {file = "librt-0.8.1-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:ac1e7817fd0ed3d14fd7c5df91daed84c48e4c2a11ee99c0547f9f62fdae13da"}, + {file = "librt-0.8.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:747328be0c5b7075cde86a0e09d7a9196029800ba75a1689332348e998fb85c0"}, + {file = "librt-0.8.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f0af2bd2bc204fa27f3d6711d0f360e6b8c684a035206257a81673ab924aa11e"}, + {file = "librt-0.8.1-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:d480de377f5b687b6b1bc0c0407426da556e2a757633cc7e4d2e1a057aa688f3"}, + {file = "librt-0.8.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d0ee06b5b5291f609ddb37b9750985b27bc567791bc87c76a569b3feed8481ac"}, + {file = "librt-0.8.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9e2c6f77b9ad48ce5603b83b7da9ee3e36b3ab425353f695cba13200c5d96596"}, + {file = "librt-0.8.1-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:439352ba9373f11cb8e1933da194dcc6206daf779ff8df0ed69c5e39113e6a99"}, + {file = "librt-0.8.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:82210adabbc331dbb65d7868b105185464ef13f56f7f76688565ad79f648b0fe"}, + {file = "librt-0.8.1-cp311-cp311-win32.whl", hash = "sha256:52c224e14614b750c0a6d97368e16804a98c684657c7518752c356834fff83bb"}, + {file = "librt-0.8.1-cp311-cp311-win_amd64.whl", hash = "sha256:c00e5c884f528c9932d278d5c9cbbea38a6b81eb62c02e06ae53751a83a4d52b"}, + {file = "librt-0.8.1-cp311-cp311-win_arm64.whl", hash = "sha256:f7cdf7f26c2286ffb02e46d7bac56c94655540b26347673bea15fa52a6af17e9"}, + {file = "librt-0.8.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a28f2612ab566b17f3698b0da021ff9960610301607c9a5e8eaca62f5e1c350a"}, + {file = "librt-0.8.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:60a78b694c9aee2a0f1aaeaa7d101cf713e92e8423a941d2897f4fa37908dab9"}, + {file = "librt-0.8.1-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:758509ea3f1eba2a57558e7e98f4659d0ea7670bff49673b0dde18a3c7e6c0eb"}, + {file = "librt-0.8.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:039b9f2c506bd0ab0f8725aa5ba339c6f0cd19d3b514b50d134789809c24285d"}, + {file = "librt-0.8.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5bb54f1205a3a6ab41a6fd71dfcdcbd278670d3a90ca502a30d9da583105b6f7"}, + {file = "librt-0.8.1-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:05bd41cdee35b0c59c259f870f6da532a2c5ca57db95b5f23689fcb5c9e42440"}, + {file = "librt-0.8.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:adfab487facf03f0d0857b8710cf82d0704a309d8ffc33b03d9302b4c64e91a9"}, + {file = "librt-0.8.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:153188fe98a72f206042be10a2c6026139852805215ed9539186312d50a8e972"}, + {file = "librt-0.8.1-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:dd3c41254ee98604b08bd5b3af5bf0a89740d4ee0711de95b65166bf44091921"}, + {file = "librt-0.8.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e0d138c7ae532908cbb342162b2611dbd4d90c941cd25ab82084aaf71d2c0bd0"}, + {file = "librt-0.8.1-cp312-cp312-win32.whl", hash = "sha256:43353b943613c5d9c49a25aaffdba46f888ec354e71e3529a00cca3f04d66a7a"}, + {file = "librt-0.8.1-cp312-cp312-win_amd64.whl", hash = "sha256:ff8baf1f8d3f4b6b7257fcb75a501f2a5499d0dda57645baa09d4d0d34b19444"}, + {file = "librt-0.8.1-cp312-cp312-win_arm64.whl", hash = "sha256:0f2ae3725904f7377e11cc37722d5d401e8b3d5851fb9273d7f4fe04f6b3d37d"}, + {file = "librt-0.8.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7e6bad1cd94f6764e1e21950542f818a09316645337fd5ab9a7acc45d99a8f35"}, + {file = "librt-0.8.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cf450f498c30af55551ba4f66b9123b7185362ec8b625a773b3d39aa1a717583"}, + {file = "librt-0.8.1-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:eca45e982fa074090057132e30585a7e8674e9e885d402eae85633e9f449ce6c"}, + {file = "librt-0.8.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0c3811485fccfda840861905b8c70bba5ec094e02825598bb9d4ca3936857a04"}, + {file = "librt-0.8.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e4af413908f77294605e28cfd98063f54b2c790561383971d2f52d113d9c363"}, + {file = "librt-0.8.1-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:5212a5bd7fae98dae95710032902edcd2ec4dc994e883294f75c857b83f9aba0"}, + {file = "librt-0.8.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e692aa2d1d604e6ca12d35e51fdc36f4cda6345e28e36374579f7ef3611b3012"}, + {file = "librt-0.8.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4be2a5c926b9770c9e08e717f05737a269b9d0ebc5d2f0060f0fe3fe9ce47acb"}, + {file = "librt-0.8.1-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:fd1a720332ea335ceb544cf0a03f81df92abd4bb887679fd1e460976b0e6214b"}, + {file = "librt-0.8.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:93c2af9e01e0ef80d95ae3c720be101227edae5f2fe7e3dc63d8857fadfc5a1d"}, + {file = "librt-0.8.1-cp313-cp313-win32.whl", hash = "sha256:086a32dbb71336627e78cc1d6ee305a68d038ef7d4c39aaff41ae8c9aa46e91a"}, + {file = "librt-0.8.1-cp313-cp313-win_amd64.whl", hash = "sha256:e11769a1dbda4da7b00a76cfffa67aa47cfa66921d2724539eee4b9ede780b79"}, + {file = "librt-0.8.1-cp313-cp313-win_arm64.whl", hash = "sha256:924817ab3141aca17893386ee13261f1d100d1ef410d70afe4389f2359fea4f0"}, + {file = "librt-0.8.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:6cfa7fe54fd4d1f47130017351a959fe5804bda7a0bc7e07a2cdbc3fdd28d34f"}, + {file = "librt-0.8.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:228c2409c079f8c11fb2e5d7b277077f694cb93443eb760e00b3b83cb8b3176c"}, + {file = "librt-0.8.1-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:7aae78ab5e3206181780e56912d1b9bb9f90a7249ce12f0e8bf531d0462dd0fc"}, + {file = "librt-0.8.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:172d57ec04346b047ca6af181e1ea4858086c80bdf455f61994c4aa6fc3f866c"}, + {file = "librt-0.8.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6b1977c4ea97ce5eb7755a78fae68d87e4102e4aaf54985e8b56806849cc06a3"}, + {file = "librt-0.8.1-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:10c42e1f6fd06733ef65ae7bebce2872bcafd8d6e6b0a08fe0a05a23b044fb14"}, + {file = "librt-0.8.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4c8dfa264b9193c4ee19113c985c95f876fae5e51f731494fc4e0cf594990ba7"}, + {file = "librt-0.8.1-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:01170b6729a438f0dedc4a26ed342e3dc4f02d1000b4b19f980e1877f0c297e6"}, + {file = "librt-0.8.1-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:7b02679a0d783bdae30d443025b94465d8c3dc512f32f5b5031f93f57ac32071"}, + {file = "librt-0.8.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:190b109bb69592a3401fe1ffdea41a2e73370ace2ffdc4a0e8e2b39cdea81b78"}, + {file = "librt-0.8.1-cp314-cp314-win32.whl", hash = "sha256:e70a57ecf89a0f64c24e37f38d3fe217a58169d2fe6ed6d70554964042474023"}, + {file = "librt-0.8.1-cp314-cp314-win_amd64.whl", hash = "sha256:7e2f3edca35664499fbb36e4770650c4bd4a08abc1f4458eab9df4ec56389730"}, + {file = "librt-0.8.1-cp314-cp314-win_arm64.whl", hash = "sha256:0d2f82168e55ddefd27c01c654ce52379c0750ddc31ee86b4b266bcf4d65f2a3"}, + {file = "librt-0.8.1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2c74a2da57a094bd48d03fa5d196da83d2815678385d2978657499063709abe1"}, + {file = "librt-0.8.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a355d99c4c0d8e5b770313b8b247411ed40949ca44e33e46a4789b9293a907ee"}, + {file = "librt-0.8.1-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:2eb345e8b33fb748227409c9f1233d4df354d6e54091f0e8fc53acdb2ffedeb7"}, + {file = "librt-0.8.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9be2f15e53ce4e83cc08adc29b26fb5978db62ef2a366fbdf716c8a6c8901040"}, + {file = "librt-0.8.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:785ae29c1f5c6e7c2cde2c7c0e148147f4503da3abc5d44d482068da5322fd9e"}, + {file = "librt-0.8.1-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:1d3a7da44baf692f0c6aeb5b2a09c5e6fc7a703bca9ffa337ddd2e2da53f7732"}, + {file = "librt-0.8.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5fc48998000cbc39ec0d5311312dda93ecf92b39aaf184c5e817d5d440b29624"}, + {file = "librt-0.8.1-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:e96baa6820280077a78244b2e06e416480ed859bbd8e5d641cf5742919d8beb4"}, + {file = "librt-0.8.1-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:31362dbfe297b23590530007062c32c6f6176f6099646bb2c95ab1b00a57c382"}, + {file = "librt-0.8.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cc3656283d11540ab0ea01978378e73e10002145117055e03722417aeab30994"}, + {file = "librt-0.8.1-cp314-cp314t-win32.whl", hash = "sha256:738f08021b3142c2918c03692608baed43bc51144c29e35807682f8070ee2a3a"}, + {file = "librt-0.8.1-cp314-cp314t-win_amd64.whl", hash = "sha256:89815a22daf9c51884fb5dbe4f1ef65ee6a146e0b6a8df05f753e2e4a9359bf4"}, + {file = "librt-0.8.1-cp314-cp314t-win_arm64.whl", hash = "sha256:bf512a71a23504ed08103a13c941f763db13fb11177beb3d9244c98c29fb4a61"}, + {file = "librt-0.8.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3dff3d3ca8db20e783b1bc7de49c0a2ab0b8387f31236d6a026597d07fcd68ac"}, + {file = "librt-0.8.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:08eec3a1fc435f0d09c87b6bf1ec798986a3544f446b864e4099633a56fcd9ed"}, + {file = "librt-0.8.1-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:e3f0a41487fd5fad7e760b9e8a90e251e27c2816fbc2cff36a22a0e6bcbbd9dd"}, + {file = "librt-0.8.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bacdb58d9939d95cc557b4dbaa86527c9db2ac1ed76a18bc8d26f6dc8647d851"}, + {file = "librt-0.8.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b6d7ab1f01aa753188605b09a51faa44a3327400b00b8cce424c71910fc0a128"}, + {file = "librt-0.8.1-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:4998009e7cb9e896569f4be7004f09d0ed70d386fa99d42b6d363f6d200501ac"}, + {file = "librt-0.8.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2cc68eeeef5e906839c7bb0815748b5b0a974ec27125beefc0f942715785b551"}, + {file = "librt-0.8.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:0bf69d79a23f4f40b8673a947a234baeeb133b5078b483b7297c5916539cf5d5"}, + {file = "librt-0.8.1-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:22b46eabd76c1986ee7d231b0765ad387d7673bbd996aa0d0d054b38ac65d8f6"}, + {file = "librt-0.8.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:237796479f4d0637d6b9cbcb926ff424a97735e68ade6facf402df4ec93375ed"}, + {file = "librt-0.8.1-cp39-cp39-win32.whl", hash = "sha256:4beb04b8c66c6ae62f8c1e0b2f097c1ebad9295c929a8d5286c05eae7c2fc7dc"}, + {file = "librt-0.8.1-cp39-cp39-win_amd64.whl", hash = "sha256:64548cde61b692dc0dc379f4b5f59a2f582c2ebe7890d09c1ae3b9e66fa015b7"}, + {file = "librt-0.8.1.tar.gz", hash = "sha256:be46a14693955b3bd96014ccbdb8339ee8c9346fbe11c1b78901b55125f14c73"}, ] [[package]] name = "markdown-it-py" -version = "3.0.0" +version = "4.0.0" description = "Python port of markdown-it. Markdown parsing, done right!" optional = false -python-versions = ">=3.8" +python-versions = ">=3.10" groups = ["main"] files = [ - {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, - {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, + {file = "markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147"}, + {file = "markdown_it_py-4.0.0.tar.gz", hash = "sha256:cb0a2b4aa34f932c007117b194e945bd74e0ec24133ceb5bac59009cda1cb9f3"}, ] [package.dependencies] @@ -408,13 +751,111 @@ mdurl = ">=0.1,<1.0" [package.extras] benchmarking = ["psutil", "pytest", "pytest-benchmark"] -code-style = ["pre-commit (>=3.0,<4.0)"] -compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "markdown-it-pyrs", "mistletoe (>=1.0,<2.0)", "mistune (>=3.0,<4.0)", "panflute (>=2.3,<3.0)"] linkify = ["linkify-it-py (>=1,<3)"] -plugins = ["mdit-py-plugins"] +plugins = ["mdit-py-plugins (>=0.5.0)"] profiling = ["gprof2dot"] -rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] -testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] +rtd = ["ipykernel", "jupyter_sphinx", "mdit-py-plugins (>=0.5.0)", "myst-parser", "pyyaml", "sphinx", "sphinx-book-theme (>=1.0,<2.0)", "sphinx-copybutton", "sphinx-design"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions", "requests"] + +[[package]] +name = "markupsafe" +version = "3.0.3" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "markupsafe-3.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2f981d352f04553a7171b8e44369f2af4055f888dfb147d55e42d29e29e74559"}, + {file = "markupsafe-3.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e1c1493fb6e50ab01d20a22826e57520f1284df32f2d8601fdd90b6304601419"}, + {file = "markupsafe-3.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1ba88449deb3de88bd40044603fafffb7bc2b055d626a330323a9ed736661695"}, + {file = "markupsafe-3.0.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f42d0984e947b8adf7dd6dde396e720934d12c506ce84eea8476409563607591"}, + {file = "markupsafe-3.0.3-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c0c0b3ade1c0b13b936d7970b1d37a57acde9199dc2aecc4c336773e1d86049c"}, + {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0303439a41979d9e74d18ff5e2dd8c43ed6c6001fd40e5bf2e43f7bd9bbc523f"}, + {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:d2ee202e79d8ed691ceebae8e0486bd9a2cd4794cec4824e1c99b6f5009502f6"}, + {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:177b5253b2834fe3678cb4a5f0059808258584c559193998be2601324fdeafb1"}, + {file = "markupsafe-3.0.3-cp310-cp310-win32.whl", hash = "sha256:2a15a08b17dd94c53a1da0438822d70ebcd13f8c3a95abe3a9ef9f11a94830aa"}, + {file = "markupsafe-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:c4ffb7ebf07cfe8931028e3e4c85f0357459a3f9f9490886198848f4fa002ec8"}, + {file = "markupsafe-3.0.3-cp310-cp310-win_arm64.whl", hash = "sha256:e2103a929dfa2fcaf9bb4e7c091983a49c9ac3b19c9061b6d5427dd7d14d81a1"}, + {file = "markupsafe-3.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1cc7ea17a6824959616c525620e387f6dd30fec8cb44f649e31712db02123dad"}, + {file = "markupsafe-3.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4bd4cd07944443f5a265608cc6aab442e4f74dff8088b0dfc8238647b8f6ae9a"}, + {file = "markupsafe-3.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b5420a1d9450023228968e7e6a9ce57f65d148ab56d2313fcd589eee96a7a50"}, + {file = "markupsafe-3.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0bf2a864d67e76e5c9a34dc26ec616a66b9888e25e7b9460e1c76d3293bd9dbf"}, + {file = "markupsafe-3.0.3-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc51efed119bc9cfdf792cdeaa4d67e8f6fcccab66ed4bfdd6bde3e59bfcbb2f"}, + {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:068f375c472b3e7acbe2d5318dea141359e6900156b5b2ba06a30b169086b91a"}, + {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:7be7b61bb172e1ed687f1754f8e7484f1c8019780f6f6b0786e76bb01c2ae115"}, + {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f9e130248f4462aaa8e2552d547f36ddadbeaa573879158d721bbd33dfe4743a"}, + {file = "markupsafe-3.0.3-cp311-cp311-win32.whl", hash = "sha256:0db14f5dafddbb6d9208827849fad01f1a2609380add406671a26386cdf15a19"}, + {file = "markupsafe-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:de8a88e63464af587c950061a5e6a67d3632e36df62b986892331d4620a35c01"}, + {file = "markupsafe-3.0.3-cp311-cp311-win_arm64.whl", hash = "sha256:3b562dd9e9ea93f13d53989d23a7e775fdfd1066c33494ff43f5418bc8c58a5c"}, + {file = "markupsafe-3.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d53197da72cc091b024dd97249dfc7794d6a56530370992a5e1a08983ad9230e"}, + {file = "markupsafe-3.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1872df69a4de6aead3491198eaf13810b565bdbeec3ae2dc8780f14458ec73ce"}, + {file = "markupsafe-3.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a7e8ae81ae39e62a41ec302f972ba6ae23a5c5396c8e60113e9066ef893da0d"}, + {file = "markupsafe-3.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d6dd0be5b5b189d31db7cda48b91d7e0a9795f31430b7f271219ab30f1d3ac9d"}, + {file = "markupsafe-3.0.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:94c6f0bb423f739146aec64595853541634bde58b2135f27f61c1ffd1cd4d16a"}, + {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:be8813b57049a7dc738189df53d69395eba14fb99345e0a5994914a3864c8a4b"}, + {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:83891d0e9fb81a825d9a6d61e3f07550ca70a076484292a70fde82c4b807286f"}, + {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:77f0643abe7495da77fb436f50f8dab76dbc6e5fd25d39589a0f1fe6548bfa2b"}, + {file = "markupsafe-3.0.3-cp312-cp312-win32.whl", hash = "sha256:d88b440e37a16e651bda4c7c2b930eb586fd15ca7406cb39e211fcff3bf3017d"}, + {file = "markupsafe-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:26a5784ded40c9e318cfc2bdb30fe164bdb8665ded9cd64d500a34fb42067b1c"}, + {file = "markupsafe-3.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:35add3b638a5d900e807944a078b51922212fb3dedb01633a8defc4b01a3c85f"}, + {file = "markupsafe-3.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e1cf1972137e83c5d4c136c43ced9ac51d0e124706ee1c8aa8532c1287fa8795"}, + {file = "markupsafe-3.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:116bb52f642a37c115f517494ea5feb03889e04df47eeff5b130b1808ce7c219"}, + {file = "markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:133a43e73a802c5562be9bbcd03d090aa5a1fe899db609c29e8c8d815c5f6de6"}, + {file = "markupsafe-3.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfcd093f13f0f0b7fdd0f198b90053bf7b2f02a3927a30e63f3ccc9df56b676"}, + {file = "markupsafe-3.0.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:509fa21c6deb7a7a273d629cf5ec029bc209d1a51178615ddf718f5918992ab9"}, + {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4afe79fb3de0b7097d81da19090f4df4f8d3a2b3adaa8764138aac2e44f3af1"}, + {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:795e7751525cae078558e679d646ae45574b47ed6e7771863fcc079a6171a0fc"}, + {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8485f406a96febb5140bfeca44a73e3ce5116b2501ac54fe953e488fb1d03b12"}, + {file = "markupsafe-3.0.3-cp313-cp313-win32.whl", hash = "sha256:bdd37121970bfd8be76c5fb069c7751683bdf373db1ed6c010162b2a130248ed"}, + {file = "markupsafe-3.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:9a1abfdc021a164803f4d485104931fb8f8c1efd55bc6b748d2f5774e78b62c5"}, + {file = "markupsafe-3.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:7e68f88e5b8799aa49c85cd116c932a1ac15caaa3f5db09087854d218359e485"}, + {file = "markupsafe-3.0.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:218551f6df4868a8d527e3062d0fb968682fe92054e89978594c28e642c43a73"}, + {file = "markupsafe-3.0.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3524b778fe5cfb3452a09d31e7b5adefeea8c5be1d43c4f810ba09f2ceb29d37"}, + {file = "markupsafe-3.0.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4e885a3d1efa2eadc93c894a21770e4bc67899e3543680313b09f139e149ab19"}, + {file = "markupsafe-3.0.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8709b08f4a89aa7586de0aadc8da56180242ee0ada3999749b183aa23df95025"}, + {file = "markupsafe-3.0.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b8512a91625c9b3da6f127803b166b629725e68af71f8184ae7e7d54686a56d6"}, + {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9b79b7a16f7fedff2495d684f2b59b0457c3b493778c9eed31111be64d58279f"}, + {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:12c63dfb4a98206f045aa9563db46507995f7ef6d83b2f68eda65c307c6829eb"}, + {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8f71bc33915be5186016f675cd83a1e08523649b0e33efdb898db577ef5bb009"}, + {file = "markupsafe-3.0.3-cp313-cp313t-win32.whl", hash = "sha256:69c0b73548bc525c8cb9a251cddf1931d1db4d2258e9599c28c07ef3580ef354"}, + {file = "markupsafe-3.0.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1b4b79e8ebf6b55351f0d91fe80f893b4743f104bff22e90697db1590e47a218"}, + {file = "markupsafe-3.0.3-cp313-cp313t-win_arm64.whl", hash = "sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287"}, + {file = "markupsafe-3.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:eaa9599de571d72e2daf60164784109f19978b327a3910d3e9de8c97b5b70cfe"}, + {file = "markupsafe-3.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c47a551199eb8eb2121d4f0f15ae0f923d31350ab9280078d1e5f12b249e0026"}, + {file = "markupsafe-3.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f34c41761022dd093b4b6896d4810782ffbabe30f2d443ff5f083e0cbbb8c737"}, + {file = "markupsafe-3.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:457a69a9577064c05a97c41f4e65148652db078a3a509039e64d3467b9e7ef97"}, + {file = "markupsafe-3.0.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e8afc3f2ccfa24215f8cb28dcf43f0113ac3c37c2f0f0806d8c70e4228c5cf4d"}, + {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ec15a59cf5af7be74194f7ab02d0f59a62bdcf1a537677ce67a2537c9b87fcda"}, + {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:0eb9ff8191e8498cca014656ae6b8d61f39da5f95b488805da4bb029cccbfbaf"}, + {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2713baf880df847f2bece4230d4d094280f4e67b1e813eec43b4c0e144a34ffe"}, + {file = "markupsafe-3.0.3-cp314-cp314-win32.whl", hash = "sha256:729586769a26dbceff69f7a7dbbf59ab6572b99d94576a5592625d5b411576b9"}, + {file = "markupsafe-3.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:bdc919ead48f234740ad807933cdf545180bfbe9342c2bb451556db2ed958581"}, + {file = "markupsafe-3.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:5a7d5dc5140555cf21a6fefbdbf8723f06fcd2f63ef108f2854de715e4422cb4"}, + {file = "markupsafe-3.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:1353ef0c1b138e1907ae78e2f6c63ff67501122006b0f9abad68fda5f4ffc6ab"}, + {file = "markupsafe-3.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1085e7fbddd3be5f89cc898938f42c0b3c711fdcb37d75221de2666af647c175"}, + {file = "markupsafe-3.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1b52b4fb9df4eb9ae465f8d0c228a00624de2334f216f178a995ccdcf82c4634"}, + {file = "markupsafe-3.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fed51ac40f757d41b7c48425901843666a6677e3e8eb0abcff09e4ba6e664f50"}, + {file = "markupsafe-3.0.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f190daf01f13c72eac4efd5c430a8de82489d9cff23c364c3ea822545032993e"}, + {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e56b7d45a839a697b5eb268c82a71bd8c7f6c94d6fd50c3d577fa39a9f1409f5"}, + {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:f3e98bb3798ead92273dc0e5fd0f31ade220f59a266ffd8a4f6065e0a3ce0523"}, + {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5678211cb9333a6468fb8d8be0305520aa073f50d17f089b5b4b477ea6e67fdc"}, + {file = "markupsafe-3.0.3-cp314-cp314t-win32.whl", hash = "sha256:915c04ba3851909ce68ccc2b8e2cd691618c4dc4c4232fb7982bca3f41fd8c3d"}, + {file = "markupsafe-3.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4faffd047e07c38848ce017e8725090413cd80cbc23d86e55c587bf979e579c9"}, + {file = "markupsafe-3.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:32001d6a8fc98c8cb5c947787c5d08b0a50663d139f1305bac5885d98d9b40fa"}, + {file = "markupsafe-3.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15d939a21d546304880945ca1ecb8a039db6b4dc49b2c5a400387cdae6a62e26"}, + {file = "markupsafe-3.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f71a396b3bf33ecaa1626c255855702aca4d3d9fea5e051b41ac59a9c1c41edc"}, + {file = "markupsafe-3.0.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f4b68347f8c5eab4a13419215bdfd7f8c9b19f2b25520968adfad23eb0ce60c"}, + {file = "markupsafe-3.0.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e8fc20152abba6b83724d7ff268c249fa196d8259ff481f3b1476383f8f24e42"}, + {file = "markupsafe-3.0.3-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:949b8d66bc381ee8b007cd945914c721d9aba8e27f71959d750a46f7c282b20b"}, + {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:3537e01efc9d4dccdf77221fb1cb3b8e1a38d5428920e0657ce299b20324d758"}, + {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:591ae9f2a647529ca990bc681daebdd52c8791ff06c2bfa05b65163e28102ef2"}, + {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a320721ab5a1aba0a233739394eb907f8c8da5c98c9181d1161e77a0c8e36f2d"}, + {file = "markupsafe-3.0.3-cp39-cp39-win32.whl", hash = "sha256:df2449253ef108a379b8b5d6b43f4b1a8e81a061d6537becd5582fba5f9196d7"}, + {file = "markupsafe-3.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:7c3fb7d25180895632e5d3148dbdc29ea38ccb7fd210aa27acbd1201a1902c6e"}, + {file = "markupsafe-3.0.3-cp39-cp39-win_arm64.whl", hash = "sha256:38664109c14ffc9e7437e86b4dceb442b0096dfe3541d7864d9cbe1da4cf36c8"}, + {file = "markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698"}, +] [[package]] name = "mccabe" @@ -515,16 +956,37 @@ files = [ [[package]] name = "packaging" -version = "25.0" +version = "26.0" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" groups = ["dev"] files = [ - {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, - {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, + {file = "packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529"}, + {file = "packaging-26.0.tar.gz", hash = "sha256:00243ae351a257117b6a241061796684b084ed1c516a08c48a3f7e147a9d80b4"}, ] +[[package]] +name = "paramiko" +version = "4.0.0" +description = "SSH2 protocol library" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "paramiko-4.0.0-py3-none-any.whl", hash = "sha256:0e20e00ac666503bf0b4eda3b6d833465a2b7aff2e2b3d79a8bba5ef144ee3b9"}, + {file = "paramiko-4.0.0.tar.gz", hash = "sha256:6a25f07b380cc9c9a88d2b920ad37167ac4667f8d9886ccebd8f90f654b5d69f"}, +] + +[package.dependencies] +bcrypt = ">=3.2" +cryptography = ">=3.3" +invoke = ">=2.0" +pynacl = ">=1.5" + +[package.extras] +gssapi = ["gssapi (>=1.4.1) ; platform_system != \"Windows\"", "pyasn1 (>=0.1.7)", "pywin32 (>=2.1.8) ; platform_system == \"Windows\""] + [[package]] name = "pathspec" version = "1.0.4" @@ -545,21 +1007,16 @@ tests = ["pytest (>=9)", "typing-extensions (>=4.15)"] [[package]] name = "platformdirs" -version = "4.3.8" +version = "4.9.2" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" groups = ["dev"] files = [ - {file = "platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4"}, - {file = "platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc"}, + {file = "platformdirs-4.9.2-py3-none-any.whl", hash = "sha256:9170634f126f8efdae22fb58ae8a0eaa86f38365bc57897a6c4f781d1f5875bd"}, + {file = "platformdirs-4.9.2.tar.gz", hash = "sha256:9a33809944b9db043ad67ca0db94b14bf452cc6aeaac46a88ea55b26e2e9d291"}, ] -[package.extras] -docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.4)", "pytest-cov (>=6)", "pytest-mock (>=3.14)"] -type = ["mypy (>=1.14.1)"] - [[package]] name = "pluggy" version = "1.6.0" @@ -590,17 +1047,173 @@ files = [ [[package]] name = "pycparser" -version = "2.22" +version = "3.0" description = "C parser in Python" optional = false -python-versions = ">=3.8" -groups = ["main"] +python-versions = ">=3.10" +groups = ["main", "dev"] markers = "platform_python_implementation != \"PyPy\" and implementation_name != \"PyPy\"" files = [ - {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, - {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, + {file = "pycparser-3.0-py3-none-any.whl", hash = "sha256:b727414169a36b7d524c1c3e31839a521725078d7b2ff038656844266160a992"}, + {file = "pycparser-3.0.tar.gz", hash = "sha256:600f49d217304a5902ac3c37e1281c9fe94e4d0489de643a9504c5cdfdfc6b29"}, +] + +[[package]] +name = "pydantic" +version = "2.12.5" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d"}, + {file = "pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49"}, ] +[package.dependencies] +annotated-types = ">=0.6.0" +pydantic-core = "2.41.5" +typing-extensions = ">=4.14.1" +typing-inspection = ">=0.4.2" + +[package.extras] +email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] + +[[package]] +name = "pydantic-core" +version = "2.41.5" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pydantic_core-2.41.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:77b63866ca88d804225eaa4af3e664c5faf3568cea95360d21f4725ab6e07146"}, + {file = "pydantic_core-2.41.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dfa8a0c812ac681395907e71e1274819dec685fec28273a28905df579ef137e2"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5921a4d3ca3aee735d9fd163808f5e8dd6c6972101e4adbda9a4667908849b97"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25c479382d26a2a41b7ebea1043564a937db462816ea07afa8a44c0866d52f9"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f547144f2966e1e16ae626d8ce72b4cfa0caedc7fa28052001c94fb2fcaa1c52"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f52298fbd394f9ed112d56f3d11aabd0d5bd27beb3084cc3d8ad069483b8941"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:100baa204bb412b74fe285fb0f3a385256dad1d1879f0a5cb1499ed2e83d132a"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05a2c8852530ad2812cb7914dc61a1125dc4e06252ee98e5638a12da6cc6fb6c"}, + {file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:29452c56df2ed968d18d7e21f4ab0ac55e71dc59524872f6fc57dcf4a3249ed2"}, + {file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:d5160812ea7a8a2ffbe233d8da666880cad0cbaf5d4de74ae15c313213d62556"}, + {file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:df3959765b553b9440adfd3c795617c352154e497a4eaf3752555cfb5da8fc49"}, + {file = "pydantic_core-2.41.5-cp310-cp310-win32.whl", hash = "sha256:1f8d33a7f4d5a7889e60dc39856d76d09333d8a6ed0f5f1190635cbec70ec4ba"}, + {file = "pydantic_core-2.41.5-cp310-cp310-win_amd64.whl", hash = "sha256:62de39db01b8d593e45871af2af9e497295db8d73b085f6bfd0b18c83c70a8f9"}, + {file = "pydantic_core-2.41.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a3a52f6156e73e7ccb0f8cced536adccb7042be67cb45f9562e12b319c119da6"}, + {file = "pydantic_core-2.41.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7f3bf998340c6d4b0c9a2f02d6a400e51f123b59565d74dc60d252ce888c260b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:378bec5c66998815d224c9ca994f1e14c0c21cb95d2f52b6021cc0b2a58f2a5a"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7b576130c69225432866fe2f4a469a85a54ade141d96fd396dffcf607b558f8"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cb58b9c66f7e4179a2d5e0f849c48eff5c1fca560994d6eb6543abf955a149e"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88942d3a3dff3afc8288c21e565e476fc278902ae4d6d134f1eeda118cc830b1"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f31d95a179f8d64d90f6831d71fa93290893a33148d890ba15de25642c5d075b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c1df3d34aced70add6f867a8cf413e299177e0c22660cc767218373d0779487b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4009935984bd36bd2c774e13f9a09563ce8de4abaa7226f5108262fa3e637284"}, + {file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:34a64bc3441dc1213096a20fe27e8e128bd3ff89921706e83c0b1ac971276594"}, + {file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c9e19dd6e28fdcaa5a1de679aec4141f691023916427ef9bae8584f9c2fb3b0e"}, + {file = "pydantic_core-2.41.5-cp311-cp311-win32.whl", hash = "sha256:2c010c6ded393148374c0f6f0bf89d206bf3217f201faa0635dcd56bd1520f6b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-win_amd64.whl", hash = "sha256:76ee27c6e9c7f16f47db7a94157112a2f3a00e958bc626e2f4ee8bec5c328fbe"}, + {file = "pydantic_core-2.41.5-cp311-cp311-win_arm64.whl", hash = "sha256:4bc36bbc0b7584de96561184ad7f012478987882ebf9f9c389b23f432ea3d90f"}, + {file = "pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7"}, + {file = "pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5"}, + {file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c"}, + {file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294"}, + {file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1"}, + {file = "pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d"}, + {file = "pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815"}, + {file = "pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3"}, + {file = "pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9"}, + {file = "pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d"}, + {file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740"}, + {file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e"}, + {file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858"}, + {file = "pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36"}, + {file = "pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11"}, + {file = "pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd"}, + {file = "pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a"}, + {file = "pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553"}, + {file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90"}, + {file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07"}, + {file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb"}, + {file = "pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23"}, + {file = "pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf"}, + {file = "pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008"}, + {file = "pydantic_core-2.41.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:8bfeaf8735be79f225f3fefab7f941c712aaca36f1128c9d7e2352ee1aa87bdf"}, + {file = "pydantic_core-2.41.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:346285d28e4c8017da95144c7f3acd42740d637ff41946af5ce6e5e420502dd5"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a75dafbf87d6276ddc5b2bf6fae5254e3d0876b626eb24969a574fff9149ee5d"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7b93a4d08587e2b7e7882de461e82b6ed76d9026ce91ca7915e740ecc7855f60"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8465ab91a4bd96d36dde3263f06caa6a8a6019e4113f24dc753d79a8b3a3f82"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:299e0a22e7ae2b85c1a57f104538b2656e8ab1873511fd718a1c1c6f149b77b5"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:707625ef0983fcfb461acfaf14de2067c5942c6bb0f3b4c99158bed6fedd3cf3"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f41eb9797986d6ebac5e8edff36d5cef9de40def462311b3eb3eeded1431e425"}, + {file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0384e2e1021894b1ff5a786dbf94771e2986ebe2869533874d7e43bc79c6f504"}, + {file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:f0cd744688278965817fd0839c4a4116add48d23890d468bc436f78beb28abf5"}, + {file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:753e230374206729bf0a807954bcc6c150d3743928a73faffee51ac6557a03c3"}, + {file = "pydantic_core-2.41.5-cp39-cp39-win32.whl", hash = "sha256:873e0d5b4fb9b89ef7c2d2a963ea7d02879d9da0da8d9d4933dee8ee86a8b460"}, + {file = "pydantic_core-2.41.5-cp39-cp39-win_amd64.whl", hash = "sha256:e4f4a984405e91527a0d62649ee21138f8e3d0ef103be488c1dc11a80d7f184b"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f15489ba13d61f670dcc96772e733aad1a6f9c429cc27574c6cdaed82d0146ad"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b5819cd790dbf0c5eb9f82c73c16b39a65dd6dd4d1439dcdea7816ec9adddab8"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5a4e67afbc95fa5c34cf27d9089bca7fcab4e51e57278d710320a70b956d1b9a"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ece5c59f0ce7d001e017643d8d24da587ea1f74f6993467d85ae8a5ef9d4f42b"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16f80f7abe3351f8ea6858914ddc8c77e02578544a0ebc15b4c2e1a0e813b0b2"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:33cb885e759a705b426baada1fe68cbb0a2e68e34c5d0d0289a364cf01709093"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:c8d8b4eb992936023be7dee581270af5c6e0697a8559895f527f5b7105ecd36a"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:242a206cd0318f95cd21bdacff3fcc3aab23e79bba5cac3db5a841c9ef9c6963"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d3a978c4f57a597908b7e697229d996d77a6d3c94901e9edee593adada95ce1a"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2379fa7ed44ddecb5bfe4e48577d752db9fc10be00a6b7446e9663ba143de26"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:266fb4cbf5e3cbd0b53669a6d1b039c45e3ce651fd5442eff4d07c2cc8d66808"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58133647260ea01e4d0500089a8c4f07bd7aa6ce109682b1426394988d8aaacc"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:287dad91cfb551c363dc62899a80e9e14da1f0e2b6ebde82c806612ca2a13ef1"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:03b77d184b9eb40240ae9fd676ca364ce1085f203e1b1256f8ab9984dca80a84"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:a668ce24de96165bb239160b3d854943128f4334822900534f2fe947930e5770"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f14f8f046c14563f8eb3f45f499cc658ab8d10072961e07225e507adb700e93f"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:56121965f7a4dc965bff783d70b907ddf3d57f6eba29b6d2e5dabfaf07799c51"}, + {file = "pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e"}, +] + +[package.dependencies] +typing-extensions = ">=4.14.1" + [[package]] name = "pyflakes" version = "3.4.0" @@ -628,6 +1241,48 @@ files = [ [package.extras] windows-terminal = ["colorama (>=0.4.6)"] +[[package]] +name = "pynacl" +version = "1.6.2" +description = "Python binding to the Networking and Cryptography (NaCl) library" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pynacl-1.6.2-cp314-cp314t-macosx_10_10_universal2.whl", hash = "sha256:622d7b07cc5c02c666795792931b50c91f3ce3c2649762efb1ef0d5684c81594"}, + {file = "pynacl-1.6.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d071c6a9a4c94d79eb665db4ce5cedc537faf74f2355e4d502591d850d3913c0"}, + {file = "pynacl-1.6.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fe9847ca47d287af41e82be1dd5e23023d3c31a951da134121ab02e42ac218c9"}, + {file = "pynacl-1.6.2-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:04316d1fc625d860b6c162fff704eb8426b1a8bcd3abacea11142cbd99a6b574"}, + {file = "pynacl-1.6.2-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:44081faff368d6c5553ccf55322ef2819abb40e25afaec7e740f159f74813634"}, + {file = "pynacl-1.6.2-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:a9f9932d8d2811ce1a8ffa79dcbdf3970e7355b5c8eb0c1a881a57e7f7d96e88"}, + {file = "pynacl-1.6.2-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:bc4a36b28dd72fb4845e5d8f9760610588a96d5a51f01d84d8c6ff9849968c14"}, + {file = "pynacl-1.6.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:3bffb6d0f6becacb6526f8f42adfb5efb26337056ee0831fb9a7044d1a964444"}, + {file = "pynacl-1.6.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:2fef529ef3ee487ad8113d287a593fa26f48ee3620d92ecc6f1d09ea38e0709b"}, + {file = "pynacl-1.6.2-cp314-cp314t-win32.whl", hash = "sha256:a84bf1c20339d06dc0c85d9aea9637a24f718f375d861b2668b2f9f96fa51145"}, + {file = "pynacl-1.6.2-cp314-cp314t-win_amd64.whl", hash = "sha256:320ef68a41c87547c91a8b58903c9caa641ab01e8512ce291085b5fe2fcb7590"}, + {file = "pynacl-1.6.2-cp314-cp314t-win_arm64.whl", hash = "sha256:d29bfe37e20e015a7d8b23cfc8bd6aa7909c92a1b8f41ee416bbb3e79ef182b2"}, + {file = "pynacl-1.6.2-cp38-abi3-macosx_10_10_universal2.whl", hash = "sha256:c949ea47e4206af7c8f604b8278093b674f7c79ed0d4719cc836902bf4517465"}, + {file = "pynacl-1.6.2-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8845c0631c0be43abdd865511c41eab235e0be69c81dc66a50911594198679b0"}, + {file = "pynacl-1.6.2-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:22de65bb9010a725b0dac248f353bb072969c94fa8d6b1f34b87d7953cf7bbe4"}, + {file = "pynacl-1.6.2-cp38-abi3-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:46065496ab748469cdd999246d17e301b2c24ae2fdf739132e580a0e94c94a87"}, + {file = "pynacl-1.6.2-cp38-abi3-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8a66d6fb6ae7661c58995f9c6435bda2b1e68b54b598a6a10247bfcdadac996c"}, + {file = "pynacl-1.6.2-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:26bfcd00dcf2cf160f122186af731ae30ab120c18e8375684ec2670dccd28130"}, + {file = "pynacl-1.6.2-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:c8a231e36ec2cab018c4ad4358c386e36eede0319a0c41fed24f840b1dac59f6"}, + {file = "pynacl-1.6.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:68be3a09455743ff9505491220b64440ced8973fe930f270c8e07ccfa25b1f9e"}, + {file = "pynacl-1.6.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:8b097553b380236d51ed11356c953bf8ce36a29a3e596e934ecabe76c985a577"}, + {file = "pynacl-1.6.2-cp38-abi3-win32.whl", hash = "sha256:5811c72b473b2f38f7e2a3dc4f8642e3a3e9b5e7317266e4ced1fba85cae41aa"}, + {file = "pynacl-1.6.2-cp38-abi3-win_amd64.whl", hash = "sha256:62985f233210dee6548c223301b6c25440852e13d59a8b81490203c3227c5ba0"}, + {file = "pynacl-1.6.2-cp38-abi3-win_arm64.whl", hash = "sha256:834a43af110f743a754448463e8fd61259cd4ab5bbedcf70f9dabad1d28a394c"}, + {file = "pynacl-1.6.2.tar.gz", hash = "sha256:018494d6d696ae03c7e656e5e74cdfd8ea1326962cc401bcf018f1ed8436811c"}, +] + +[package.dependencies] +cffi = {version = ">=2.0.0", markers = "platform_python_implementation != \"PyPy\" and python_version >= \"3.9\""} + +[package.extras] +docs = ["sphinx (<7)", "sphinx_rtd_theme"] +tests = ["hypothesis (>=3.27.0)", "pytest (>=7.4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] + [[package]] name = "pytest" version = "9.0.2" @@ -650,6 +1305,21 @@ pygments = ">=2.7.2" [package.extras] dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "requests", "setuptools", "xmlschema"] +[[package]] +name = "python-dotenv" +version = "1.2.1" +description = "Read key-value pairs from a .env file and set them as environment variables" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "python_dotenv-1.2.1-py3-none-any.whl", hash = "sha256:b81ee9561e9ca4004139c6cbba3a238c32b03e4894671e181b671e8cb8425d61"}, + {file = "python_dotenv-1.2.1.tar.gz", hash = "sha256:42667e897e16ab0d66954af0e60a9caa94f0fd4ecf3aaf6d2d260eec1aa36ad6"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + [[package]] name = "pytokens" version = "0.4.1" @@ -705,16 +1375,152 @@ files = [ [package.extras] dev = ["black", "build", "mypy", "pytest", "pytest-cov", "setuptools", "tox", "twine", "wheel"] +[[package]] +name = "pywin32" +version = "311" +description = "Python for Window Extensions" +optional = false +python-versions = "*" +groups = ["dev"] +markers = "sys_platform == \"win32\"" +files = [ + {file = "pywin32-311-cp310-cp310-win32.whl", hash = "sha256:d03ff496d2a0cd4a5893504789d4a15399133fe82517455e78bad62efbb7f0a3"}, + {file = "pywin32-311-cp310-cp310-win_amd64.whl", hash = "sha256:797c2772017851984b97180b0bebe4b620bb86328e8a884bb626156295a63b3b"}, + {file = "pywin32-311-cp310-cp310-win_arm64.whl", hash = "sha256:0502d1facf1fed4839a9a51ccbcc63d952cf318f78ffc00a7e78528ac27d7a2b"}, + {file = "pywin32-311-cp311-cp311-win32.whl", hash = "sha256:184eb5e436dea364dcd3d2316d577d625c0351bf237c4e9a5fabbcfa5a58b151"}, + {file = "pywin32-311-cp311-cp311-win_amd64.whl", hash = "sha256:3ce80b34b22b17ccbd937a6e78e7225d80c52f5ab9940fe0506a1a16f3dab503"}, + {file = "pywin32-311-cp311-cp311-win_arm64.whl", hash = "sha256:a733f1388e1a842abb67ffa8e7aad0e70ac519e09b0f6a784e65a136ec7cefd2"}, + {file = "pywin32-311-cp312-cp312-win32.whl", hash = "sha256:750ec6e621af2b948540032557b10a2d43b0cee2ae9758c54154d711cc852d31"}, + {file = "pywin32-311-cp312-cp312-win_amd64.whl", hash = "sha256:b8c095edad5c211ff31c05223658e71bf7116daa0ecf3ad85f3201ea3190d067"}, + {file = "pywin32-311-cp312-cp312-win_arm64.whl", hash = "sha256:e286f46a9a39c4a18b319c28f59b61de793654af2f395c102b4f819e584b5852"}, + {file = "pywin32-311-cp313-cp313-win32.whl", hash = "sha256:f95ba5a847cba10dd8c4d8fefa9f2a6cf283b8b88ed6178fa8a6c1ab16054d0d"}, + {file = "pywin32-311-cp313-cp313-win_amd64.whl", hash = "sha256:718a38f7e5b058e76aee1c56ddd06908116d35147e133427e59a3983f703a20d"}, + {file = "pywin32-311-cp313-cp313-win_arm64.whl", hash = "sha256:7b4075d959648406202d92a2310cb990fea19b535c7f4a78d3f5e10b926eeb8a"}, + {file = "pywin32-311-cp314-cp314-win32.whl", hash = "sha256:b7a2c10b93f8986666d0c803ee19b5990885872a7de910fc460f9b0c2fbf92ee"}, + {file = "pywin32-311-cp314-cp314-win_amd64.whl", hash = "sha256:3aca44c046bd2ed8c90de9cb8427f581c479e594e99b5c0bb19b29c10fd6cb87"}, + {file = "pywin32-311-cp314-cp314-win_arm64.whl", hash = "sha256:a508e2d9025764a8270f93111a970e1d0fbfc33f4153b388bb649b7eec4f9b42"}, + {file = "pywin32-311-cp38-cp38-win32.whl", hash = "sha256:6c6f2969607b5023b0d9ce2541f8d2cbb01c4f46bc87456017cf63b73f1e2d8c"}, + {file = "pywin32-311-cp38-cp38-win_amd64.whl", hash = "sha256:c8015b09fb9a5e188f83b7b04de91ddca4658cee2ae6f3bc483f0b21a77ef6cd"}, + {file = "pywin32-311-cp39-cp39-win32.whl", hash = "sha256:aba8f82d551a942cb20d4a83413ccbac30790b50efb89a75e4f586ac0bb8056b"}, + {file = "pywin32-311-cp39-cp39-win_amd64.whl", hash = "sha256:e0c4cfb0621281fe40387df582097fd796e80430597cb9944f0ae70447bacd91"}, + {file = "pywin32-311-cp39-cp39-win_arm64.whl", hash = "sha256:62ea666235135fee79bb154e695f3ff67370afefd71bd7fea7512fc70ef31e3d"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.3" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "PyYAML-6.0.3-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:c2514fceb77bc5e7a2f7adfaa1feb2fb311607c9cb518dbc378688ec73d8292f"}, + {file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c57bb8c96f6d1808c030b1687b9b5fb476abaa47f0db9c0101f5e9f394e97f4"}, + {file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:efd7b85f94a6f21e4932043973a7ba2613b059c4a000551892ac9f1d11f5baf3"}, + {file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22ba7cfcad58ef3ecddc7ed1db3409af68d023b7f940da23c6c2a1890976eda6"}, + {file = "PyYAML-6.0.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6344df0d5755a2c9a276d4473ae6b90647e216ab4757f8426893b5dd2ac3f369"}, + {file = "PyYAML-6.0.3-cp38-cp38-win32.whl", hash = "sha256:3ff07ec89bae51176c0549bc4c63aa6202991da2d9a6129d7aef7f1407d3f295"}, + {file = "PyYAML-6.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:5cf4e27da7e3fbed4d6c3d8e797387aaad68102272f8f9752883bc32d61cb87b"}, + {file = "pyyaml-6.0.3-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:214ed4befebe12df36bcc8bc2b64b396ca31be9304b8f59e25c11cf94a4c033b"}, + {file = "pyyaml-6.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02ea2dfa234451bbb8772601d7b8e426c2bfa197136796224e50e35a78777956"}, + {file = "pyyaml-6.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b30236e45cf30d2b8e7b3e85881719e98507abed1011bf463a8fa23e9c3e98a8"}, + {file = "pyyaml-6.0.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:66291b10affd76d76f54fad28e22e51719ef9ba22b29e1d7d03d6777a9174198"}, + {file = "pyyaml-6.0.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9c7708761fccb9397fe64bbc0395abcae8c4bf7b0eac081e12b809bf47700d0b"}, + {file = "pyyaml-6.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:418cf3f2111bc80e0933b2cd8cd04f286338bb88bdc7bc8e6dd775ebde60b5e0"}, + {file = "pyyaml-6.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5e0b74767e5f8c593e8c9b5912019159ed0533c70051e9cce3e8b6aa699fcd69"}, + {file = "pyyaml-6.0.3-cp310-cp310-win32.whl", hash = "sha256:28c8d926f98f432f88adc23edf2e6d4921ac26fb084b028c733d01868d19007e"}, + {file = "pyyaml-6.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:bdb2c67c6c1390b63c6ff89f210c8fd09d9a1217a465701eac7316313c915e4c"}, + {file = "pyyaml-6.0.3-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:44edc647873928551a01e7a563d7452ccdebee747728c1080d881d68af7b997e"}, + {file = "pyyaml-6.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:652cb6edd41e718550aad172851962662ff2681490a8a711af6a4d288dd96824"}, + {file = "pyyaml-6.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:10892704fc220243f5305762e276552a0395f7beb4dbf9b14ec8fd43b57f126c"}, + {file = "pyyaml-6.0.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:850774a7879607d3a6f50d36d04f00ee69e7fc816450e5f7e58d7f17f1ae5c00"}, + {file = "pyyaml-6.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b8bb0864c5a28024fac8a632c443c87c5aa6f215c0b126c449ae1a150412f31d"}, + {file = "pyyaml-6.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1d37d57ad971609cf3c53ba6a7e365e40660e3be0e5175fa9f2365a379d6095a"}, + {file = "pyyaml-6.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:37503bfbfc9d2c40b344d06b2199cf0e96e97957ab1c1b546fd4f87e53e5d3e4"}, + {file = "pyyaml-6.0.3-cp311-cp311-win32.whl", hash = "sha256:8098f252adfa6c80ab48096053f512f2321f0b998f98150cea9bd23d83e1467b"}, + {file = "pyyaml-6.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:9f3bfb4965eb874431221a3ff3fdcddc7e74e3b07799e0e84ca4a0f867d449bf"}, + {file = "pyyaml-6.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7f047e29dcae44602496db43be01ad42fc6f1cc0d8cd6c83d342306c32270196"}, + {file = "pyyaml-6.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fc09d0aa354569bc501d4e787133afc08552722d3ab34836a80547331bb5d4a0"}, + {file = "pyyaml-6.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9149cad251584d5fb4981be1ecde53a1ca46c891a79788c0df828d2f166bda28"}, + {file = "pyyaml-6.0.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5fdec68f91a0c6739b380c83b951e2c72ac0197ace422360e6d5a959d8d97b2c"}, + {file = "pyyaml-6.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ba1cc08a7ccde2d2ec775841541641e4548226580ab850948cbfda66a1befcdc"}, + {file = "pyyaml-6.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8dc52c23056b9ddd46818a57b78404882310fb473d63f17b07d5c40421e47f8e"}, + {file = "pyyaml-6.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:41715c910c881bc081f1e8872880d3c650acf13dfa8214bad49ed4cede7c34ea"}, + {file = "pyyaml-6.0.3-cp312-cp312-win32.whl", hash = "sha256:96b533f0e99f6579b3d4d4995707cf36df9100d67e0c8303a0c55b27b5f99bc5"}, + {file = "pyyaml-6.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:5fcd34e47f6e0b794d17de1b4ff496c00986e1c83f7ab2fb8fcfe9616ff7477b"}, + {file = "pyyaml-6.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:64386e5e707d03a7e172c0701abfb7e10f0fb753ee1d773128192742712a98fd"}, + {file = "pyyaml-6.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8"}, + {file = "pyyaml-6.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1"}, + {file = "pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c"}, + {file = "pyyaml-6.0.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5"}, + {file = "pyyaml-6.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6"}, + {file = "pyyaml-6.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6"}, + {file = "pyyaml-6.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be"}, + {file = "pyyaml-6.0.3-cp313-cp313-win32.whl", hash = "sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26"}, + {file = "pyyaml-6.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c"}, + {file = "pyyaml-6.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb"}, + {file = "pyyaml-6.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac"}, + {file = "pyyaml-6.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310"}, + {file = "pyyaml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7"}, + {file = "pyyaml-6.0.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788"}, + {file = "pyyaml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5"}, + {file = "pyyaml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764"}, + {file = "pyyaml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35"}, + {file = "pyyaml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac"}, + {file = "pyyaml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3"}, + {file = "pyyaml-6.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3"}, + {file = "pyyaml-6.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba"}, + {file = "pyyaml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c"}, + {file = "pyyaml-6.0.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702"}, + {file = "pyyaml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c"}, + {file = "pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065"}, + {file = "pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65"}, + {file = "pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9"}, + {file = "pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b"}, + {file = "pyyaml-6.0.3-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:b865addae83924361678b652338317d1bd7e79b1f4596f96b96c77a5a34b34da"}, + {file = "pyyaml-6.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c3355370a2c156cffb25e876646f149d5d68f5e0a3ce86a5084dd0b64a994917"}, + {file = "pyyaml-6.0.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3c5677e12444c15717b902a5798264fa7909e41153cdf9ef7ad571b704a63dd9"}, + {file = "pyyaml-6.0.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5ed875a24292240029e4483f9d4a4b8a1ae08843b9c54f43fcc11e404532a8a5"}, + {file = "pyyaml-6.0.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0150219816b6a1fa26fb4699fb7daa9caf09eb1999f3b70fb6e786805e80375a"}, + {file = "pyyaml-6.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:fa160448684b4e94d80416c0fa4aac48967a969efe22931448d853ada8baf926"}, + {file = "pyyaml-6.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:27c0abcb4a5dac13684a37f76e701e054692a9b2d3064b70f5e4eb54810553d7"}, + {file = "pyyaml-6.0.3-cp39-cp39-win32.whl", hash = "sha256:1ebe39cb5fc479422b83de611d14e2c0d3bb2a18bbcb01f229ab3cfbd8fee7a0"}, + {file = "pyyaml-6.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:2e71d11abed7344e42a8849600193d15b6def118602c4c176f748e4583246007"}, + {file = "pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f"}, +] + +[[package]] +name = "requests" +version = "2.32.5" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6"}, + {file = "requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset_normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + [[package]] name = "rich" -version = "14.1.0" +version = "14.3.3" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = false python-versions = ">=3.8.0" groups = ["main"] files = [ - {file = "rich-14.1.0-py3-none-any.whl", hash = "sha256:536f5f1785986d6dbdea3c75205c473f970777b4a0d6c6dd1b696aa05a3fa04f"}, - {file = "rich-14.1.0.tar.gz", hash = "sha256:e497a48b844b0320d45007cdebfeaeed8db2a4f4bcf49f15e455cfc4af11eaa8"}, + {file = "rich-14.3.3-py3-none-any.whl", hash = "sha256:793431c1f8619afa7d3b52b2cdec859562b950ea0d4b6b505397612db8d5362d"}, + {file = "rich-14.3.3.tar.gz", hash = "sha256:b8daa0b9e4eef54dd8cf7c86c03713f53241884e814f4e2f5fb342fe520f639b"}, ] [package.dependencies] @@ -736,16 +1542,70 @@ files = [ {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"}, ] +[[package]] +name = "testcontainers" +version = "4.14.1" +description = "Python library for throwaway instances of anything that can run in a Docker container" +optional = false +python-versions = ">=3.10" +groups = ["dev"] +files = [ + {file = "testcontainers-4.14.1-py3-none-any.whl", hash = "sha256:03dfef4797b31c82e7b762a454b6afec61a2a512ad54af47ab41e4fa5415f891"}, + {file = "testcontainers-4.14.1.tar.gz", hash = "sha256:316f1bb178d829c003acd650233e3ff3c59a833a08d8661c074f58a4fbd42a64"}, +] + +[package.dependencies] +docker = "*" +python-dotenv = "*" +typing-extensions = "*" +urllib3 = "*" +wrapt = "*" + +[package.extras] +arangodb = ["python-arango (>=8,<9)"] +aws = ["boto3 (>=1,<2)", "httpx"] +azurite = ["azure-storage-blob (>=12,<13)"] +chroma = ["chromadb-client (>=1,<2)"] +cosmosdb = ["azure-cosmos (>=4,<5)"] +db2 = ["ibm_db_sa ; platform_machine != \"aarch64\" and platform_machine != \"arm64\"", "sqlalchemy (>=2,<3)"] +generic = ["httpx", "redis (>=7,<8)"] +google = ["google-cloud-datastore (>=2,<3)", "google-cloud-pubsub (>=2,<3)"] +influxdb = ["influxdb (>=5,<6)", "influxdb-client (>=1,<2)"] +k3s = ["kubernetes", "pyyaml (>=6.0.3)"] +keycloak = ["python-keycloak (>=6,<7) ; python_version < \"4.0\""] +localstack = ["boto3 (>=1,<2)"] +mailpit = ["cryptography"] +minio = ["minio (>=7,<8)"] +mongodb = ["pymongo (>=4,<5)"] +mssql = ["pymssql (>=2,<3)", "sqlalchemy (>=2,<3)"] +mysql = ["pymysql[rsa] (>=1,<2)", "sqlalchemy (>=2,<3)"] +nats = ["nats-py (>=2,<3)"] +neo4j = ["neo4j (>=6,<7)"] +openfga = ["openfga-sdk"] +opensearch = ["opensearch-py (>=3,<4) ; python_version < \"4.0\""] +oracle = ["oracledb (>=3,<4)", "sqlalchemy (>=2,<3)"] +oracle-free = ["oracledb (>=3,<4)", "sqlalchemy (>=2,<3)"] +qdrant = ["qdrant-client (>=1,<2)"] +rabbitmq = ["pika (>=1,<2)"] +redis = ["redis (>=7,<8)"] +registry = ["bcrypt (>=5,<6)"] +scylla = ["cassandra-driver (>=3,<4)"] +selenium = ["selenium (>=4,<5)"] +sftp = ["cryptography"] +test-module-import = ["httpx"] +trino = ["trino"] +weaviate = ["weaviate-client (>=4,<5)"] + [[package]] name = "typer" -version = "0.24.0" +version = "0.24.1" description = "Typer, build great CLIs. Easy to code. Based on Python type hints." optional = false python-versions = ">=3.10" groups = ["main"] files = [ - {file = "typer-0.24.0-py3-none-any.whl", hash = "sha256:5fc435a9c8356f6160ed6e85a6301fdd6e3d8b2851da502050d1f92c5e9eddc8"}, - {file = "typer-0.24.0.tar.gz", hash = "sha256:f9373dc4eff901350694f519f783c29b6d7a110fc0dcc11b1d7e353b85ca6504"}, + {file = "typer-0.24.1-py3-none-any.whl", hash = "sha256:112c1f0ce578bfb4cab9ffdabc68f031416ebcc216536611ba21f04e9aa84c9e"}, + {file = "typer-0.24.1.tar.gz", hash = "sha256:e39b4732d65fbdcde189ae76cf7cd48aeae72919dea1fdfc16593be016256b45"}, ] [package.dependencies] @@ -754,19 +1614,198 @@ click = ">=8.2.1" rich = ">=12.3.0" shellingham = ">=1.3.0" +[[package]] +name = "types-docker" +version = "7.1.0.20260109" +description = "Typing stubs for docker" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "types_docker-7.1.0.20260109-py3-none-any.whl", hash = "sha256:001a5a377d3fb287b7279cf4265b8ba3857e7d4203a16ab03e6e512f68f2f3d4"}, + {file = "types_docker-7.1.0.20260109.tar.gz", hash = "sha256:b36ef355ec9ba8bf29bcc4e32cc61dd9138ce4d8352c599c8fbc65f1a3e87b57"}, +] + +[package.dependencies] +types-paramiko = "*" +types-requests = "*" +urllib3 = ">=2" + +[[package]] +name = "types-paramiko" +version = "4.0.0.20250822" +description = "Typing stubs for paramiko" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "types_paramiko-4.0.0.20250822-py3-none-any.whl", hash = "sha256:55bdb14db75ca89039725ec64ae3fa26b8d57b6991cfb476212fa8f83a59753c"}, + {file = "types_paramiko-4.0.0.20250822.tar.gz", hash = "sha256:1b56b0cbd3eec3d2fd123c9eb2704e612b777e15a17705a804279ea6525e0c53"}, +] + +[package.dependencies] +cryptography = ">=37.0.0" + +[[package]] +name = "types-pyyaml" +version = "6.0.12.20250915" +description = "Typing stubs for PyYAML" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "types_pyyaml-6.0.12.20250915-py3-none-any.whl", hash = "sha256:e7d4d9e064e89a3b3cae120b4990cd370874d2bf12fa5f46c97018dd5d3c9ab6"}, + {file = "types_pyyaml-6.0.12.20250915.tar.gz", hash = "sha256:0f8b54a528c303f0e6f7165687dd33fafa81c807fcac23f632b63aa624ced1d3"}, +] + +[[package]] +name = "types-requests" +version = "2.32.4.20260107" +description = "Typing stubs for requests" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "types_requests-2.32.4.20260107-py3-none-any.whl", hash = "sha256:b703fe72f8ce5b31ef031264fe9395cac8f46a04661a79f7ed31a80fb308730d"}, + {file = "types_requests-2.32.4.20260107.tar.gz", hash = "sha256:018a11ac158f801bfa84857ddec1650750e393df8a004a8a9ae2a9bec6fcb24f"}, +] + +[package.dependencies] +urllib3 = ">=2" + [[package]] name = "typing-extensions" -version = "4.14.1" +version = "4.15.0" description = "Backported and Experimental Type Hints for Python 3.9+" optional = false python-versions = ">=3.9" +groups = ["main", "dev"] +files = [ + {file = "typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548"}, + {file = "typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466"}, +] + +[[package]] +name = "typing-inspection" +version = "0.4.2" +description = "Runtime typing introspection tools" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7"}, + {file = "typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464"}, +] + +[package.dependencies] +typing-extensions = ">=4.12.0" + +[[package]] +name = "urllib3" +version = "2.6.3" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "typing_extensions-4.14.1-py3-none-any.whl", hash = "sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76"}, - {file = "typing_extensions-4.14.1.tar.gz", hash = "sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36"}, + {file = "urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4"}, + {file = "urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed"}, +] + +[package.extras] +brotli = ["brotli (>=1.2.0) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=1.2.0.0) ; platform_python_implementation != \"CPython\""] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["backports-zstd (>=1.0.0) ; python_version < \"3.14\""] + +[[package]] +name = "wrapt" +version = "2.1.1" +description = "Module for decorators, wrappers and monkey patching." +optional = false +python-versions = ">=3.9" +groups = ["main", "dev"] +files = [ + {file = "wrapt-2.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7e927375e43fd5a985b27a8992327c22541b6dede1362fc79df337d26e23604f"}, + {file = "wrapt-2.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e1c99544b6a7d40ca22195563b6d8bc3986ee8bb82f272f31f0670fe9440c869"}, + {file = "wrapt-2.1.1-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:b2be3fa5f4efaf16ee7c77d0556abca35f5a18ad4ac06f0ef3904c3399010ce9"}, + {file = "wrapt-2.1.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:67c90c1ae6489a6cb1a82058902caa8006706f7b4e8ff766f943e9d2c8e608d0"}, + {file = "wrapt-2.1.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:05c0db35ccffd7480143e62df1e829d101c7b86944ae3be7e4869a7efa621f53"}, + {file = "wrapt-2.1.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0c2ec9f616755b2e1e0bf4d0961f59bb5c2e7a77407e7e2c38ef4f7d2fdde12c"}, + {file = "wrapt-2.1.1-cp310-cp310-win32.whl", hash = "sha256:203ba6b3f89e410e27dbd30ff7dccaf54dcf30fda0b22aa1b82d560c7f9fe9a1"}, + {file = "wrapt-2.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:6f9426d9cfc2f8732922fc96198052e55c09bb9db3ddaa4323a18e055807410e"}, + {file = "wrapt-2.1.1-cp310-cp310-win_arm64.whl", hash = "sha256:69c26f51b67076b40714cff81bdd5826c0b10c077fb6b0678393a6a2f952a5fc"}, + {file = "wrapt-2.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6c366434a7fb914c7a5de508ed735ef9c133367114e1a7cb91dfb5cd806a1549"}, + {file = "wrapt-2.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5d6a2068bd2e1e19e5a317c8c0b288267eec4e7347c36bc68a6e378a39f19ee7"}, + {file = "wrapt-2.1.1-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:891ab4713419217b2aed7dd106c9200f64e6a82226775a0d2ebd6bef2ebd1747"}, + {file = "wrapt-2.1.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c8ef36a0df38d2dc9d907f6617f89e113c5892e0a35f58f45f75901af0ce7d81"}, + {file = "wrapt-2.1.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:76e9af3ebd86f19973143d4d592cbf3e970cf3f66ddee30b16278c26ae34b8ab"}, + {file = "wrapt-2.1.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ff562067485ebdeaef2fa3fe9b1876bc4e7b73762e0a01406ad81e2076edcebf"}, + {file = "wrapt-2.1.1-cp311-cp311-win32.whl", hash = "sha256:9e60a30aa0909435ec4ea2a3c53e8e1b50ac9f640c0e9fe3f21fd248a22f06c5"}, + {file = "wrapt-2.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:7d79954f51fcf84e5ec4878ab4aea32610d70145c5bbc84b3370eabfb1e096c2"}, + {file = "wrapt-2.1.1-cp311-cp311-win_arm64.whl", hash = "sha256:d3ffc6b0efe79e08fd947605fd598515aebefe45e50432dc3b5cd437df8b1ada"}, + {file = "wrapt-2.1.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ab8e3793b239db021a18782a5823fcdea63b9fe75d0e340957f5828ef55fcc02"}, + {file = "wrapt-2.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7c0300007836373d1c2df105b40777986accb738053a92fe09b615a7a4547e9f"}, + {file = "wrapt-2.1.1-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:2b27c070fd1132ab23957bcd4ee3ba707a91e653a9268dc1afbd39b77b2799f7"}, + {file = "wrapt-2.1.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b0e36d845e8b6f50949b6b65fc6cd279f47a1944582ed4ec8258cd136d89a64"}, + {file = "wrapt-2.1.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4aeea04a9889370fcfb1ef828c4cc583f36a875061505cd6cd9ba24d8b43cc36"}, + {file = "wrapt-2.1.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d88b46bb0dce9f74b6817bc1758ff2125e1ca9e1377d62ea35b6896142ab6825"}, + {file = "wrapt-2.1.1-cp312-cp312-win32.whl", hash = "sha256:63decff76ca685b5c557082dfbea865f3f5f6d45766a89bff8dc61d336348833"}, + {file = "wrapt-2.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:b828235d26c1e35aca4107039802ae4b1411be0fe0367dd5b7e4d90e562fcbcd"}, + {file = "wrapt-2.1.1-cp312-cp312-win_arm64.whl", hash = "sha256:75128507413a9f1bcbe2db88fd18fbdbf80f264b82fa33a6996cdeaf01c52352"}, + {file = "wrapt-2.1.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ce9646e17fa7c3e2e7a87e696c7de66512c2b4f789a8db95c613588985a2e139"}, + {file = "wrapt-2.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:428cfc801925454395aa468ba7ddb3ed63dc0d881df7b81626cdd433b4e2b11b"}, + {file = "wrapt-2.1.1-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:5797f65e4d58065a49088c3b32af5410751cd485e83ba89e5a45e2aa8905af98"}, + {file = "wrapt-2.1.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5a2db44a71202c5ae4bb5f27c6d3afbc5b23053f2e7e78aa29704541b5dad789"}, + {file = "wrapt-2.1.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:8d5350c3590af09c1703dd60ec78a7370c0186e11eaafb9dda025a30eee6492d"}, + {file = "wrapt-2.1.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:2d9b076411bed964e752c01b49fd224cc385f3a96f520c797d38412d70d08359"}, + {file = "wrapt-2.1.1-cp313-cp313-win32.whl", hash = "sha256:0bb7207130ce6486727baa85373503bf3334cc28016f6928a0fa7e19d7ecdc06"}, + {file = "wrapt-2.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:cbfee35c711046b15147b0ae7db9b976f01c9520e6636d992cd9e69e5e2b03b1"}, + {file = "wrapt-2.1.1-cp313-cp313-win_arm64.whl", hash = "sha256:7d2756061022aebbf57ba14af9c16e8044e055c22d38de7bf40d92b565ecd2b0"}, + {file = "wrapt-2.1.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4814a3e58bc6971e46baa910ecee69699110a2bf06c201e24277c65115a20c20"}, + {file = "wrapt-2.1.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:106c5123232ab9b9f4903692e1fa0bdc231510098f04c13c3081f8ad71c3d612"}, + {file = "wrapt-2.1.1-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1a40b83ff2535e6e56f190aff123821eea89a24c589f7af33413b9c19eb2c738"}, + {file = "wrapt-2.1.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:789cea26e740d71cf1882e3a42bb29052bc4ada15770c90072cb47bf73fb3dbf"}, + {file = "wrapt-2.1.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:ba49c14222d5e5c0ee394495a8655e991dc06cbca5398153aefa5ac08cd6ccd7"}, + {file = "wrapt-2.1.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ac8cda531fe55be838a17c62c806824472bb962b3afa47ecbd59b27b78496f4e"}, + {file = "wrapt-2.1.1-cp313-cp313t-win32.whl", hash = "sha256:b8af75fe20d381dd5bcc9db2e86a86d7fcfbf615383a7147b85da97c1182225b"}, + {file = "wrapt-2.1.1-cp313-cp313t-win_amd64.whl", hash = "sha256:45c5631c9b6c792b78be2d7352129f776dd72c605be2c3a4e9be346be8376d83"}, + {file = "wrapt-2.1.1-cp313-cp313t-win_arm64.whl", hash = "sha256:da815b9263947ac98d088b6414ac83507809a1d385e4632d9489867228d6d81c"}, + {file = "wrapt-2.1.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:9aa1765054245bb01a37f615503290d4e207e3fd59226e78341afb587e9c1236"}, + {file = "wrapt-2.1.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:feff14b63a6d86c1eee33a57f77573649f2550935981625be7ff3cb7342efe05"}, + {file = "wrapt-2.1.1-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:81fc5f22d5fcfdbabde96bb3f5379b9f4476d05c6d524d7259dc5dfb501d3281"}, + {file = "wrapt-2.1.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:951b228ecf66def855d22e006ab9a1fc12535111ae7db2ec576c728f8ddb39e8"}, + {file = "wrapt-2.1.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:0ddf582a95641b9a8c8bd643e83f34ecbbfe1b68bc3850093605e469ab680ae3"}, + {file = "wrapt-2.1.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:fc5c500966bf48913f795f1984704e6d452ba2414207b15e1f8c339a059d5b16"}, + {file = "wrapt-2.1.1-cp314-cp314-win32.whl", hash = "sha256:4aa4baadb1f94b71151b8e44a0c044f6af37396c3b8bcd474b78b49e2130a23b"}, + {file = "wrapt-2.1.1-cp314-cp314-win_amd64.whl", hash = "sha256:860e9d3fd81816a9f4e40812f28be4439ab01f260603c749d14be3c0a1170d19"}, + {file = "wrapt-2.1.1-cp314-cp314-win_arm64.whl", hash = "sha256:3c59e103017a2c1ea0ddf589cbefd63f91081d7ce9d491d69ff2512bb1157e23"}, + {file = "wrapt-2.1.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:9fa7c7e1bee9278fc4f5dd8275bc8d25493281a8ec6c61959e37cc46acf02007"}, + {file = "wrapt-2.1.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:39c35e12e8215628984248bd9c8897ce0a474be2a773db207eb93414219d8469"}, + {file = "wrapt-2.1.1-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:94ded4540cac9125eaa8ddf5f651a7ec0da6f5b9f248fe0347b597098f8ec14c"}, + {file = "wrapt-2.1.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:da0af328373f97ed9bdfea24549ac1b944096a5a71b30e41c9b8b53ab3eec04a"}, + {file = "wrapt-2.1.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:4ad839b55f0bf235f8e337ce060572d7a06592592f600f3a3029168e838469d3"}, + {file = "wrapt-2.1.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0d89c49356e5e2a50fa86b40e0510082abcd0530f926cbd71cf25bee6b9d82d7"}, + {file = "wrapt-2.1.1-cp314-cp314t-win32.whl", hash = "sha256:f4c7dd22cf7f36aafe772f3d88656559205c3af1b7900adfccb70edeb0d2abc4"}, + {file = "wrapt-2.1.1-cp314-cp314t-win_amd64.whl", hash = "sha256:f76bc12c583ab01e73ba0ea585465a41e48d968f6d1311b4daec4f8654e356e3"}, + {file = "wrapt-2.1.1-cp314-cp314t-win_arm64.whl", hash = "sha256:7ea74fc0bec172f1ae5f3505b6655c541786a5cabe4bbc0d9723a56ac32eb9b9"}, + {file = "wrapt-2.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9e03b3d486eb39f5d3f562839f59094dcee30c4039359ea15768dc2214d9e07c"}, + {file = "wrapt-2.1.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0fdf3073f488ce4d929929b7799e3b8c52b220c9eb3f4a5a51e2dc0e8ff07881"}, + {file = "wrapt-2.1.1-cp39-cp39-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:0cb4f59238c6625fae2eeb72278da31c9cfba0ff4d9cbe37446b73caa0e9bcf7"}, + {file = "wrapt-2.1.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7f794a1c148871b714cb566f5466ec8288e0148a1c417550983864b3981737cd"}, + {file = "wrapt-2.1.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:95ef3866631c6da9ce1fc0f1e17b90c4c0aa6d041fc70a11bc90733aee122e1a"}, + {file = "wrapt-2.1.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:66bc1b2446f01cbbd3c56b79a3a8435bcd4178ac4e06b091913f7751a7f528b8"}, + {file = "wrapt-2.1.1-cp39-cp39-win32.whl", hash = "sha256:1b9e08e57cabc32972f7c956d10e85093c5da9019faa24faf411e7dd258e528c"}, + {file = "wrapt-2.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:e75ad48c3cca739f580b5e14c052993eb644c7fa5b4c90aa51193280b30875ae"}, + {file = "wrapt-2.1.1-cp39-cp39-win_arm64.whl", hash = "sha256:9ccd657873b7f964711447d004563a2bc08d1476d7a1afcad310f3713e6f50f4"}, + {file = "wrapt-2.1.1-py3-none-any.whl", hash = "sha256:3b0f4629eb954394a3d7c7a1c8cca25f0b07cefe6aa8545e862e9778152de5b7"}, + {file = "wrapt-2.1.1.tar.gz", hash = "sha256:5fdcb09bf6db023d88f312bd0767594b414655d58090fc1c46b3414415f67fac"}, ] +[package.extras] +dev = ["pytest", "setuptools"] + [metadata] lock-version = "2.1" -python-versions = "^3.13" -content-hash = "030613e4f1d70f66974965b38509bb62b6c9c76de1c3bd4c460e839d1888d2e4" +python-versions = ">=3.14" +content-hash = "e39a9da5ef2d959c382afeee4f8d962c5eab8977f8a4eddb228fe382f122e3aa" diff --git a/pyproject.toml b/pyproject.toml index 69dae8f..15bbb5e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,35 +1,63 @@ -[tool.poetry] -name = "ssb" -version = "0.1.0" -description = "Simple Safe Backup - A secure backup solution" -authors = ["Sami Dalouche "] +[project] +name = "nbkp" +description = "Nomad Backup: An rsync-based backup tool for nomadic setups where sources and destinations aren't always available — laptops on the move, removable drives, home servers behind changing networks. Sentinel files ensure backups only run when volumes are genuinely present, with optional btrfs or hard-link snapshots for point-in-time recovery." readme = "README.md" -packages = [{include = "ssb"}] +requires-python = ">=3.14" +license = "Apache-2.0" +license-files = ["LICENSE"] +authors = [{ name = "Sami Dalouche", email = "sami.dalouche@gmail.com" }] +dependencies = [ + "fabric (>=3.0,<4.0)", + "jinja2 (>=3.0,<4.0)", + "pydantic (>=2.12.5,<3.0.0)", + "pyyaml (>=6.0,<7.0)", + "typer (>=0.24.0,<1.0.0)", +] +dynamic = ["version"] + +[tool.poetry.requires-plugins] +poetry-dynamic-versioning = { version = ">=1.0.0,<2.0.0", extras = ["plugin"] } -[tool.poetry.scripts] -ssb = "ssb.cli:main" +[tool.poetry-dynamic-versioning] +enable = true -[tool.poetry.dependencies] -python = "^3.13" -cryptography = "^46.0.0" -typer = "^0.24.0" +[project.scripts] +nbkp = "nbkp.cli:main" +nbkp-test = "nbkp.testcli:main" + +[tool.poetry] +packages = [{ include = "nbkp" }] +version = "0.0.0" [tool.poetry.group.dev.dependencies] -pytest = "^9.0.0" black = "^26.0.0" flake8 = "^7.0.0" mypy = "^1.9.0" +pytest = "^9.0.0" +testcontainers = "^4.0.0" +types-docker = "^7.1.0.20260109" +types-paramiko = "^4.0.0.20250822" +types-pyyaml = "^6.0" [build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" +requires = ["poetry-core>=1.0.0", "poetry-dynamic-versioning>=1.0.0,<2.0.0"] +build-backend = "poetry_dynamic_versioning.backend" [tool.black] line-length = 79 -target-version = ['py313'] +target-version = ["py314"] + +[tool.pytest.ini_options] +markers = [ + "integration: Integration tests requiring Docker", +] [tool.mypy] -python_version = "3.13" +python_version = "3.14" warn_return_any = true warn_unused_configs = true disallow_untyped_defs = true + +[virtualenvs] +# works jointly with mise to ensure the correct Python version is used in the virtualenv +in-project = true diff --git a/ssb/__init__.py b/ssb/__init__.py deleted file mode 100644 index b0c13eb..0000000 --- a/ssb/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -""" -Simple Safe Backup (SSB) - A secure backup solution. - -This package provides tools for creating secure, encrypted backups -of files and directories. -""" - -__version__ = "0.1.0" -__author__ = "Your Name" -__email__ = "your.email@example.com" - -from .backup import BackupManager -from .encryption import EncryptionManager - -__all__ = ["BackupManager", "EncryptionManager"] diff --git a/ssb/backup.py b/ssb/backup.py deleted file mode 100644 index 8b1a96f..0000000 --- a/ssb/backup.py +++ /dev/null @@ -1,107 +0,0 @@ -""" -Backup management functionality for SSB. -""" - -import shutil -from pathlib import Path -from typing import List, Optional -from .encryption import EncryptionManager - - -class BackupManager: - """Manages backup operations for files and directories.""" - - def __init__( - self, - backup_dir: str, - encryption_manager: Optional[EncryptionManager] = None, - ): - """ - Initialize the backup manager. - - Args: - backup_dir: Directory where backups will be stored - encryption_manager: Optional encryption manager for secure backups - """ - self.backup_dir = Path(backup_dir) - self.backup_dir.mkdir(parents=True, exist_ok=True) - self.encryption_manager = encryption_manager or EncryptionManager() - - def create_backup( - self, source_path: str, backup_name: Optional[str] = None - ) -> str: - """ - Create a backup of the specified source. - - Args: - source_path: Path to the file or directory to backup - backup_name: Optional name for the backup (defaults to source name) - - Returns: - Path to the created backup - """ - source = Path(source_path) - if not source.exists(): - raise FileNotFoundError( - f"Source path does not exist: {source_path}" - ) - - if backup_name is None: - backup_name = source.name - - backup_path = self.backup_dir / backup_name - - # Check if source and destination are the same - if source.resolve() == backup_path.resolve(): - raise ValueError( - f"Cannot backup to the same location: {source_path}" - ) - - if source.is_file(): - return self._backup_file(source, backup_path) - elif source.is_dir(): - return self._backup_directory(source, backup_path) - else: - raise ValueError( - f"Source path is neither a file nor directory: {source_path}" - ) - - def _backup_file(self, source: Path, backup_path: Path) -> str: - """Backup a single file.""" - shutil.copy2(source, backup_path) - return str(backup_path) - - def _backup_directory(self, source: Path, backup_path: Path) -> str: - """Backup a directory.""" - shutil.copytree(source, backup_path, dirs_exist_ok=True) - return str(backup_path) - - def list_backups(self) -> List[str]: - """List all available backups.""" - return [ - item.name for item in self.backup_dir.iterdir() if item.exists() - ] - - def restore_backup(self, backup_name: str, destination: str) -> str: - """ - Restore a backup to the specified destination. - - Args: - backup_name: Name of the backup to restore - destination: Path where the backup should be restored - - Returns: - Path to the restored backup - """ - backup_path = self.backup_dir / backup_name - if not backup_path.exists(): - raise FileNotFoundError(f"Backup not found: {backup_name}") - - dest_path = Path(destination) - - if backup_path.is_file(): - shutil.copy2(backup_path, dest_path) - else: - shutil.copytree(backup_path, dest_path, dirs_exist_ok=True) - - return str(dest_path) diff --git a/ssb/cli.py b/ssb/cli.py deleted file mode 100644 index e5896fd..0000000 --- a/ssb/cli.py +++ /dev/null @@ -1,107 +0,0 @@ -""" -Command-line interface for SSB. -""" - -from pathlib import Path -from typing import Optional - -import typer - -from .backup import BackupManager -from .encryption import EncryptionManager - -app = typer.Typer( - name="ssb", - help="Simple Safe Backup - A secure backup solution", - no_args_is_help=True, -) - - -@app.command() -def backup( - source: str = typer.Argument(..., help="Source file or directory to backup"), - backup_dir: str = typer.Argument(..., help="Directory to store backups"), - name: Optional[str] = typer.Option(None, "--name", "-n", help="Name for the backup"), - encrypt: bool = typer.Option(False, "--encrypt", "-e", help="Encrypt the backup"), - password: Optional[str] = typer.Option(None, "--password", "-p", help="Password for encryption"), -) -> None: - """Create a backup of the specified source.""" - try: - source_path = Path(source) - if not source_path.exists(): - typer.echo(f"Error: Source not found: {source}", err=True) - raise typer.Exit(1) - - # Create encryption manager if needed - encryption_manager = None - if encrypt: - if not password: - password = typer.prompt("Enter encryption password", hide_input=True) - if password: # Ensure password is not None - encryption_manager = EncryptionManager.from_password(password) - - # Create backup - backup_manager = BackupManager(backup_dir, encryption_manager) - backup_path = backup_manager.create_backup(source, name) - - typer.echo(f"Backup created successfully: {backup_path}") - - except Exception as e: - typer.echo(f"Error: {e}", err=True) - raise typer.Exit(1) - - -@app.command() -def restore( - backup_name: str = typer.Argument(..., help="Name of the backup to restore"), - backup_dir: str = typer.Argument(..., help="Directory containing backups"), - destination: str = typer.Argument(..., help="Destination path for restoration"), - password: Optional[str] = typer.Option(None, "--password", "-p", help="Password for decryption"), -) -> None: - """Restore a backup to the specified destination.""" - try: - backup_manager = BackupManager(backup_dir) - - # Check if backup exists - backups = backup_manager.list_backups() - if backup_name not in backups: - typer.echo(f"Error: Backup not found: {backup_name}", err=True) - raise typer.Exit(1) - - # Restore backup - restored_path = backup_manager.restore_backup(backup_name, destination) - typer.echo(f"Backup restored successfully: {restored_path}") - - except Exception as e: - typer.echo(f"Error: {e}", err=True) - raise typer.Exit(1) - - -@app.command() -def list_backups( - backup_dir: str = typer.Argument(..., help="Directory containing backups"), -) -> None: - """List all available backups.""" - try: - backup_manager = BackupManager(backup_dir) - backups = backup_manager.list_backups() - - if not backups: - typer.echo("No backups found.") - else: - typer.echo("Available backups:") - for backup in sorted(backups): - typer.echo(f" - {backup}") - - except Exception as e: - typer.echo(f"Error: {e}", err=True) - raise typer.Exit(1) - - -def main() -> None: - """Main CLI entry point.""" - app() - - -if __name__ == "__main__": - main() diff --git a/ssb/encryption.py b/ssb/encryption.py deleted file mode 100644 index 055b8af..0000000 --- a/ssb/encryption.py +++ /dev/null @@ -1,130 +0,0 @@ -""" -Encryption functionality for secure backups. -""" - -import os -from pathlib import Path -from typing import Optional -from cryptography.fernet import Fernet -from cryptography.hazmat.primitives import hashes -from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC -import base64 - - -class EncryptionManager: - """Manages encryption and decryption of backup files.""" - - def __init__(self, key: Optional[bytes] = None): - """ - Initialize the encryption manager. - - Args: - key: Optional encryption key (will generate one if not provided) - """ - if key is None: - self.key = Fernet.generate_key() - else: - self.key = key - self.cipher = Fernet(self.key) - - @classmethod - def from_password( - cls, password: str, salt: Optional[bytes] = None - ) -> "EncryptionManager": - """ - Create an encryption manager from a password. - - Args: - password: Password to derive the key from - salt: Optional salt for key derivation - - Returns: - EncryptionManager instance - """ - if salt is None: - salt = os.urandom(16) - - kdf = PBKDF2HMAC( - algorithm=hashes.SHA256(), - length=32, - salt=salt, - iterations=100000, - ) - key = base64.urlsafe_b64encode(kdf.derive(password.encode())) - return cls(key) - - def encrypt_file(self, input_path: str, output_path: str) -> None: - """ - Encrypt a file. - - Args: - input_path: Path to the file to encrypt - output_path: Path where the encrypted file will be saved - """ - input_file = Path(input_path) - output_file = Path(output_path) - - if not input_file.exists(): - raise FileNotFoundError(f"Input file not found: {input_path}") - - with open(input_file, "rb") as f: - data = f.read() - - encrypted_data = self.cipher.encrypt(data) - - with open(output_file, "wb") as f: - f.write(encrypted_data) - - def decrypt_file(self, input_path: str, output_path: str) -> None: - """ - Decrypt a file. - - Args: - input_path: Path to the encrypted file - output_path: Path where the decrypted file will be saved - """ - input_file = Path(input_path) - output_file = Path(output_path) - - if not input_file.exists(): - raise FileNotFoundError(f"Input file not found: {input_path}") - - with open(input_file, "rb") as f: - encrypted_data = f.read() - - try: - decrypted_data = self.cipher.decrypt(encrypted_data) - except Exception as e: - raise ValueError(f"Failed to decrypt file: {e}") - - with open(output_file, "wb") as f: - f.write(decrypted_data) - - def get_key(self) -> bytes: - """Get the current encryption key.""" - return self.key - - def save_key(self, key_path: str) -> None: - """ - Save the encryption key to a file. - - Args: - key_path: Path where the key will be saved - """ - with open(key_path, "wb") as f: - f.write(self.key) - - @classmethod - def load_key(cls, key_path: str) -> "EncryptionManager": - """ - Load an encryption manager from a saved key file. - - Args: - key_path: Path to the saved key file - - Returns: - EncryptionManager instance - """ - with open(key_path, "rb") as f: - key = f.read() - return cls(key) diff --git a/tests/__init__.py b/tests/__init__.py index 93ebb05..8bbb5e6 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1 +1 @@ -# Tests package for ssbng +# Tests package for nbkp diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..f8aad6d --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,143 @@ +"""Shared test fixtures.""" + +from __future__ import annotations + +from pathlib import Path + +import pytest +import yaml + +from nbkp.config import ( + Config, + DestinationSyncEndpoint, + LocalVolume, + RemoteVolume, + SshEndpoint, + SyncConfig, + SyncEndpoint, +) + + +def config_to_yaml(config: Config) -> str: + """Convert a Config to a YAML string.""" + return yaml.safe_dump( + config.model_dump(by_alias=True), + default_flow_style=False, + sort_keys=False, + ) + + +def _sample_config() -> Config: + """Build the full sample Config.""" + return Config( + ssh_endpoints={ + "nas-server": SshEndpoint( + slug="nas-server", + host="nas.example.com", + port=5022, + user="backup", + key="~/.ssh/key", + ), + }, + volumes={ + "local-data": LocalVolume(slug="local-data", path="/mnt/data"), + "nas": RemoteVolume( + slug="nas", + ssh_endpoint="nas-server", + path="/volume1/backups", + ), + }, + syncs={ + "photos-to-nas": SyncConfig( + slug="photos-to-nas", + source=SyncEndpoint(volume="local-data", subdir="photos"), + destination=DestinationSyncEndpoint( + volume="nas", + subdir="photos-backup", + ), + enabled=True, + filters=["+ *.jpg", "- *.tmp"], + filter_file=("~/.config/nbkp/filters/photos.rules"), + ), + }, + ) + + +def _sample_minimal_config() -> Config: + """Build the minimal sample Config.""" + return Config( + volumes={ + "src": LocalVolume(slug="src", path="/src"), + "dst": LocalVolume(slug="dst", path="/dst"), + }, + syncs={ + "s1": SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ), + }, + ) + + +@pytest.fixture() +def sample_config_file(tmp_path: Path) -> Path: + """Write sample YAML config to a temp file.""" + p = tmp_path / "config.yaml" + p.write_text(config_to_yaml(_sample_config())) + return p + + +@pytest.fixture() +def sample_minimal_config_file(tmp_path: Path) -> Path: + """Write minimal YAML config to a temp file.""" + p = tmp_path / "config.yaml" + p.write_text(config_to_yaml(_sample_minimal_config())) + return p + + +@pytest.fixture() +def local_volume() -> LocalVolume: + return LocalVolume(slug="local-data", path="/mnt/data") + + +@pytest.fixture() +def ssh_endpoint() -> SshEndpoint: + return SshEndpoint( + slug="nas-server", + host="nas.example.com", + port=5022, + user="backup", + key="~/.ssh/key", + ) + + +@pytest.fixture() +def ssh_endpoint_minimal() -> SshEndpoint: + return SshEndpoint( + slug="nas2-server", + host="nas2.example.com", + ) + + +@pytest.fixture() +def remote_volume() -> RemoteVolume: + return RemoteVolume( + slug="nas", + ssh_endpoint="nas-server", + path="/volume1/backups", + ) + + +@pytest.fixture() +def remote_volume_minimal() -> RemoteVolume: + return RemoteVolume( + slug="nas2", + ssh_endpoint="nas2-server", + path="/backups", + ) + + +@pytest.fixture() +def sample_config() -> Config: + return _sample_config() diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py new file mode 100644 index 0000000..910a25f --- /dev/null +++ b/tests/integration/conftest.py @@ -0,0 +1,324 @@ +"""Integration test fixtures -- Docker SSH server with rsync + btrfs.""" + +from __future__ import annotations + +import shutil +import tempfile +import uuid +from pathlib import Path +from typing import Any, Generator + +import docker as dockerlib +import pytest + +from nbkp.config import RemoteVolume, SshEndpoint +from nbkp.testkit.docker import ( # noqa: F401 + DOCKER_DIR, + REMOTE_BACKUP_PATH, + REMOTE_BTRFS_PATH, + create_sentinels, + create_test_ssh_endpoint, + generate_ssh_keypair, + prepare_btrfs_snapshot_based_backup_dst, + prepare_hardlinks_snapshot_based_backup_dst, + ssh_exec, + wait_for_ssh, +) + + +def _docker_available() -> bool: + """Check if Docker is available and running.""" + try: + client = dockerlib.from_env() + client.ping() + return True + except dockerlib.errors.DockerException: + return False + + +pytestmark = pytest.mark.skipif( + not _docker_available(), reason="Docker not available" +) + + +@pytest.fixture(scope="session") +def ssh_key_pair() -> Generator[tuple[Path, Path], None, None]: + """Generate an ephemeral ed25519 SSH key pair for tests.""" + tmpdir = Path(tempfile.mkdtemp(prefix="nbkp-test-ssh-")) + pair = generate_ssh_keypair(tmpdir) + + yield pair + + shutil.rmtree(tmpdir, ignore_errors=True) + + +@pytest.fixture(scope="session") +def _docker_image() -> str: + """Build the Docker image and return its tag.""" + from testcontainers.core.image import DockerImage + + image = DockerImage( + path=str(DOCKER_DIR), + tag="nbkp-test-server:latest", + ) + image.build() + return str(image) + + +@pytest.fixture(scope="session") +def _docker_network() -> Generator[Any, None, None]: + """Create a Docker bridge network for inter-container comms.""" + client = dockerlib.from_env() + name = f"nbkp-test-{uuid.uuid4().hex[:8]}" + network = client.networks.create(name, driver="bridge") + + yield network + + try: + network.remove() + except dockerlib.errors.APIError: + pass + + +@pytest.fixture(scope="session") +def docker_container( + ssh_key_pair: tuple[Path, Path], + _docker_image: str, + _docker_network: Any, +) -> Generator[SshEndpoint, None, None]: + """Start Docker container and yield SshEndpoint.""" + from testcontainers.core.container import DockerContainer + from testcontainers.core.wait_strategies import ( + LogMessageWaitStrategy, + ) + + private_key, public_key = ssh_key_pair + + wait_strategy = LogMessageWaitStrategy( + "Server listening", + ).with_startup_timeout(30) + + container = ( + DockerContainer(_docker_image) + .with_exposed_ports(22) + .with_volume_mapping( + str(public_key), + "/mnt/ssh-authorized-keys", + "ro", + ) + .with_kwargs(privileged=True) + .waiting_for(wait_strategy) + ) + container.start() + + # Connect to network with alias for bastion access + wrapped = container.get_wrapped_container() + _docker_network.connect(wrapped, aliases=["backup-server"]) + + server = create_test_ssh_endpoint( + "test-server", + container.get_container_host_ip(), + int(container.get_exposed_port(22)), + private_key, + ) + + wait_for_ssh(server, timeout=30) + + yield server + + container.stop() + + +@pytest.fixture(scope="session") +def bastion_container( + ssh_key_pair: tuple[Path, Path], + _docker_image: str, + _docker_network: Any, +) -> Generator[SshEndpoint, None, None]: + """Start a bastion (jump proxy) container.""" + from testcontainers.core.container import DockerContainer + from testcontainers.core.wait_strategies import ( + LogMessageWaitStrategy, + ) + + private_key, public_key = ssh_key_pair + + wait_strategy = LogMessageWaitStrategy( + "Server listening", + ).with_startup_timeout(30) + + container = ( + DockerContainer(_docker_image) + .with_exposed_ports(22) + .with_volume_mapping( + str(public_key), + "/mnt/ssh-authorized-keys", + "ro", + ) + .with_env("NBKP_BASTION_ONLY", "1") + .waiting_for(wait_strategy) + ) + container.start() + + wrapped = container.get_wrapped_container() + _docker_network.connect(wrapped) + + server = create_test_ssh_endpoint( + "bastion", + container.get_container_host_ip(), + int(container.get_exposed_port(22)), + private_key, + ) + + wait_for_ssh(server, timeout=30) + yield server + + container.stop() + + +@pytest.fixture(scope="session") +def proxied_ssh_endpoint( + ssh_key_pair: tuple[Path, Path], + bastion_container: SshEndpoint, + docker_container: SshEndpoint, +) -> SshEndpoint: + """SshEndpoint that routes through the bastion.""" + private_key, _ = ssh_key_pair + return create_test_ssh_endpoint( + "proxied-server", + "backup-server", + 22, + private_key, + proxy_jump="bastion", + ) + + +@pytest.fixture(scope="session") +def ssh_endpoint( + docker_container: SshEndpoint, +) -> SshEndpoint: + """SshEndpoint pointing at the Docker container.""" + return docker_container + + +@pytest.fixture(scope="session") +def remote_volume() -> RemoteVolume: + """RemoteVolume pointing at /srv/backups on the container.""" + return RemoteVolume( + slug="test-remote", + ssh_endpoint="test-server", + path=REMOTE_BACKUP_PATH, + ) + + +@pytest.fixture(scope="session") +def remote_btrfs_volume() -> RemoteVolume: + """RemoteVolume pointing at /srv/btrfs-backups.""" + return RemoteVolume( + slug="test-btrfs", + ssh_endpoint="test-server", + path=REMOTE_BTRFS_PATH, + ) + + +@pytest.fixture(scope="session") +def remote_hardlink_volume() -> RemoteVolume: + """RemoteVolume pointing at /srv/backups.""" + return RemoteVolume( + slug="test-hl", + ssh_endpoint="test-server", + path=REMOTE_BACKUP_PATH, + ) + + +# ── Cleanup ───────────────────────────────────────────────────── + + +@pytest.fixture(autouse=True) +def _cleanup_remote( + request: pytest.FixtureRequest, +) -> Generator[None, None, None]: + """Clean up /srv/backups and /srv/btrfs-backups between tests.""" + yield + + # Only clean up if ssh_endpoint was used by this test + if "ssh_endpoint" not in request.fixturenames: + return + + server: SshEndpoint = request.getfixturevalue("ssh_endpoint") + + def run(cmd: str) -> None: + ssh_exec(server, cmd, check=False) + + # Clean /srv/backups (glob * skips dotfiles, so also remove + # sentinels) + run(f"rm -rf {REMOTE_BACKUP_PATH}/*") + run(f"find {REMOTE_BACKUP_PATH}" " -name '.nbkp-*' -delete") + + # Clean btrfs paths — delete snapshot subvolumes first, + # then latest + snapshots_result = ssh_exec( + server, + f"ls {REMOTE_BTRFS_PATH}/snapshots 2>/dev/null || true", + check=False, + ) + if snapshots_result.stdout.strip(): + for snap in snapshots_result.stdout.strip().split("\n"): + snap = snap.strip() + if snap: + run( + "btrfs property set" + f" {REMOTE_BTRFS_PATH}/snapshots/{snap}" + " ro false 2>/dev/null || true" + ) + run( + "btrfs subvolume delete" + f" {REMOTE_BTRFS_PATH}/snapshots/{snap}" + " 2>/dev/null || true" + ) + + # Delete latest subvolume if it exists + run( + "btrfs property set" + f" {REMOTE_BTRFS_PATH}/latest ro false" + " 2>/dev/null || true" + ) + run( + "btrfs subvolume delete" + f" {REMOTE_BTRFS_PATH}/latest" + " 2>/dev/null || true" + ) + run(f"rm -rf {REMOTE_BTRFS_PATH}/snapshots" " 2>/dev/null || true") + + # Clean chain subpath (btrfs subvolume with its own + # latest + snapshots, used by chain test) + chain = f"{REMOTE_BTRFS_PATH}/chain" + chain_snaps = ssh_exec( + server, + f"ls {chain}/snapshots 2>/dev/null || true", + check=False, + ) + if chain_snaps.stdout.strip(): + for snap in chain_snaps.stdout.strip().split("\n"): + snap = snap.strip() + if snap: + run( + "btrfs property set" + f" {chain}/snapshots/{snap}" + " ro false 2>/dev/null || true" + ) + run( + "btrfs subvolume delete" + f" {chain}/snapshots/{snap}" + " 2>/dev/null || true" + ) + run( + "btrfs property set" f" {chain}/latest ro false" " 2>/dev/null || true" + ) + run(f"btrfs subvolume delete {chain}/latest" " 2>/dev/null || true") + run(f"btrfs subvolume delete {chain}" " 2>/dev/null || true") + + # Clean bare subpath on btrfs (regular dir, used by chain test) + run(f"rm -rf {REMOTE_BTRFS_PATH}/bare" " 2>/dev/null || true") + + run(f"find {REMOTE_BTRFS_PATH}" " -name '.nbkp-*' -delete") diff --git a/tests/integration/test_btrfs.py b/tests/integration/test_btrfs.py new file mode 100644 index 0000000..f928b2e --- /dev/null +++ b/tests/integration/test_btrfs.py @@ -0,0 +1,347 @@ +"""Integration tests: btrfs snapshots via remote Docker container.""" + +from __future__ import annotations + +import time +from pathlib import Path + +import pytest + +from nbkp.sync.btrfs import ( + create_snapshot, + get_latest_snapshot, + list_snapshots, + prune_snapshots, +) +from nbkp.config import ( + BtrfsSnapshotConfig, + Config, + DestinationSyncEndpoint, + LocalVolume, + RemoteVolume, + ResolvedEndpoints, + SshEndpoint, + SyncConfig, + SyncEndpoint, + resolve_all_endpoints, +) +from nbkp.sync.rsync import run_rsync +from nbkp.testkit.docker import REMOTE_BTRFS_PATH +from nbkp.testkit.gen.fs import create_seed_sentinels + +from .conftest import ssh_exec + +pytestmark = pytest.mark.integration + + +def _make_btrfs_config( + src_path: str, + remote_btrfs_volume: RemoteVolume, + ssh_endpoint: SshEndpoint, +) -> tuple[SyncConfig, Config, ResolvedEndpoints]: + """Build btrfs config and create seed sentinels.""" + src_vol = LocalVolume(slug="src", path=src_path) + sync = SyncConfig( + slug="test-sync", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + config = Config( + ssh_endpoints={"test-server": ssh_endpoint}, + volumes={ + "src": src_vol, + "dst": remote_btrfs_volume, + }, + syncs={"test-sync": sync}, + ) + + def _run_remote(cmd: str) -> None: + ssh_exec(ssh_endpoint, cmd) + + create_seed_sentinels(config, remote_exec=_run_remote) + + resolved = resolve_all_endpoints(config) + return sync, config, resolved + + +class TestBtrfsSnapshots: + def test_snapshot_created( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + remote_btrfs_volume: RemoteVolume, + ) -> None: + src = tmp_path / "src" + src.mkdir() + (src / "data.txt").write_text("snapshot me") + + sync, config, resolved = _make_btrfs_config( + str(src), remote_btrfs_volume, ssh_endpoint + ) + + # Rsync into latest + result = run_rsync( + sync, config, resolved_endpoints=resolved, dest_suffix="latest" + ) + assert result.returncode == 0 + + # Create snapshot + snapshot_path = create_snapshot( + sync, config, resolved_endpoints=resolved + ) + + # Verify snapshot exists + check = ssh_exec(ssh_endpoint, f"test -d {snapshot_path}") + assert check.returncode == 0 + + def test_snapshot_readonly( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + remote_btrfs_volume: RemoteVolume, + ) -> None: + src = tmp_path / "src" + src.mkdir() + (src / "data.txt").write_text("readonly test") + + sync, config, resolved = _make_btrfs_config( + str(src), remote_btrfs_volume, ssh_endpoint + ) + run_rsync( + sync, config, resolved_endpoints=resolved, dest_suffix="latest" + ) + snapshot_path = create_snapshot( + sync, config, resolved_endpoints=resolved + ) + + # Check readonly property + check = ssh_exec( + ssh_endpoint, + f"btrfs property get {snapshot_path} ro", + ) + assert check.returncode == 0 + assert "ro=true" in check.stdout + + def test_second_sync_link_dest( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + remote_btrfs_volume: RemoteVolume, + ) -> None: + src = tmp_path / "src" + src.mkdir() + (src / "file.txt").write_text("v1") + + sync, config, resolved = _make_btrfs_config( + str(src), remote_btrfs_volume, ssh_endpoint + ) + + # First sync + snapshot + run_rsync( + sync, config, resolved_endpoints=resolved, dest_suffix="latest" + ) + create_snapshot(sync, config, resolved_endpoints=resolved) + + # Small delay to ensure distinct timestamp + time.sleep(0.1) + + # Second sync should use link-dest from first snapshot + latest_snap = get_latest_snapshot( + sync, + config, + resolved_endpoints=resolved, + ) + assert latest_snap is not None + + link_dest = f"../../snapshots/{latest_snap.rsplit('/', 1)[-1]}" + result = run_rsync( + sync, + config, + link_dest=link_dest, + resolved_endpoints=resolved, + dest_suffix="latest", + ) + assert result.returncode == 0 + + # Create second snapshot + snapshot_path = create_snapshot( + sync, config, resolved_endpoints=resolved + ) + check = ssh_exec(ssh_endpoint, f"test -d {snapshot_path}") + assert check.returncode == 0 + + def test_dry_run_no_snapshot( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + remote_btrfs_volume: RemoteVolume, + ) -> None: + # Count existing snapshots before dry run + before = ssh_exec( + ssh_endpoint, + f"ls {REMOTE_BTRFS_PATH}/snapshots 2>/dev/null || true", + ) + count_before = len( + [s for s in before.stdout.strip().split("\n") if s.strip()] + ) + + src = tmp_path / "src" + src.mkdir() + (src / "data.txt").write_text("dry run") + + sync, config, resolved = _make_btrfs_config( + str(src), remote_btrfs_volume, ssh_endpoint + ) + + # Dry-run rsync + result = run_rsync( + sync, + config, + dry_run=True, + resolved_endpoints=resolved, + dest_suffix="latest", + ) + assert result.returncode == 0 + + # Verify no new snapshot was created + after = ssh_exec( + ssh_endpoint, + f"ls {REMOTE_BTRFS_PATH}/snapshots 2>/dev/null || true", + ) + count_after = len( + [s for s in after.stdout.strip().split("\n") if s.strip()] + ) + assert count_after == count_before + + +class TestPruneSnapshots: + def _create_snapshots( + self, + sync: SyncConfig, + config: Config, + resolved: ResolvedEndpoints, + count: int, + ) -> list[str]: + """Create multiple snapshots with distinct timestamps.""" + paths: list[str] = [] + for _ in range(count): + path = create_snapshot( + sync, + config, + resolved_endpoints=resolved, + ) + paths.append(path) + time.sleep(0.1) # distinct timestamps + return paths + + def test_prune_deletes_oldest_snapshots( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + remote_btrfs_volume: RemoteVolume, + ) -> None: + src = tmp_path / "src" + src.mkdir() + (src / "data.txt").write_text("prune test") + + sync, config, resolved = _make_btrfs_config( + str(src), remote_btrfs_volume, ssh_endpoint + ) + run_rsync( + sync, config, resolved_endpoints=resolved, dest_suffix="latest" + ) + + self._create_snapshots(sync, config, resolved, 3) + + # Prune to keep only 1 + deleted = prune_snapshots( + sync, + config, + max_snapshots=1, + resolved_endpoints=resolved, + ) + assert len(deleted) == 2 + + # Verify only 1 snapshot remains + remaining = list_snapshots( + sync, + config, + resolved_endpoints=resolved, + ) + assert len(remaining) == 1 + + def test_prune_dry_run_keeps_all( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + remote_btrfs_volume: RemoteVolume, + ) -> None: + src = tmp_path / "src" + src.mkdir() + (src / "data.txt").write_text("dry run prune") + + sync, config, resolved = _make_btrfs_config( + str(src), remote_btrfs_volume, ssh_endpoint + ) + run_rsync( + sync, config, resolved_endpoints=resolved, dest_suffix="latest" + ) + + self._create_snapshots(sync, config, resolved, 3) + + # Dry-run prune + deleted = prune_snapshots( + sync, + config, + max_snapshots=1, + dry_run=True, + resolved_endpoints=resolved, + ) + assert len(deleted) == 2 + + # All 3 snapshots still exist + remaining = list_snapshots( + sync, + config, + resolved_endpoints=resolved, + ) + assert len(remaining) == 3 + + def test_prune_noop_when_under_limit( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + remote_btrfs_volume: RemoteVolume, + ) -> None: + src = tmp_path / "src" + src.mkdir() + (src / "data.txt").write_text("noop prune") + + sync, config, resolved = _make_btrfs_config( + str(src), remote_btrfs_volume, ssh_endpoint + ) + run_rsync( + sync, config, resolved_endpoints=resolved, dest_suffix="latest" + ) + + self._create_snapshots(sync, config, resolved, 2) + + # Prune with limit higher than count + deleted = prune_snapshots( + sync, + config, + max_snapshots=5, + resolved_endpoints=resolved, + ) + assert deleted == [] + + # All 2 snapshots still exist + remaining = list_snapshots( + sync, + config, + resolved_endpoints=resolved, + ) + assert len(remaining) == 2 diff --git a/tests/integration/test_chain.py b/tests/integration/test_chain.py new file mode 100644 index 0000000..045b7e8 --- /dev/null +++ b/tests/integration/test_chain.py @@ -0,0 +1,276 @@ +"""Integration test: end-to-end chain sync pipeline. + +Verifies data propagates through a 6-hop chain using all +supported sync variants and snapshot modes, with bastion SSH +for all remote access: + + src-local-bare → stage-local-hl-snapshots → + stage-remote-bare → stage-remote-btrfs-snapshots → + stage-remote-btrfs-bare → stage-remote-hl-snapshots → + dst-local-bare +""" + +from __future__ import annotations + +from pathlib import Path + +import pytest + +from nbkp.check import check_all_syncs +from nbkp.config import ( + BtrfsSnapshotConfig, + Config, + HardLinkSnapshotConfig, + LocalVolume, + RemoteVolume, + SshEndpoint, + SyncConfig, + SyncEndpoint, + resolve_all_endpoints, +) +from nbkp.sync.runner import run_all_syncs +from nbkp.testkit.docker import ( + REMOTE_BACKUP_PATH, + REMOTE_BTRFS_PATH, +) +from nbkp.testkit.gen.fs import create_seed_sentinels, seed_volume + +from .conftest import ssh_exec + +pytestmark = pytest.mark.integration + +BTRFS_SNAPSHOTS_PATH = f"{REMOTE_BTRFS_PATH}/snapshots" +BTRFS_BARE_PATH = f"{REMOTE_BTRFS_PATH}/bare" + + +def _build_chain_config( + tmp_path: Path, + bastion_endpoint: SshEndpoint, + proxied_endpoint: SshEndpoint, +) -> Config: + """Build a 6-hop chain config across local and remote volumes. + + Volumes: + src-local-bare — chain origin (bare source) + stage-local-hl-snapshots — HL dest / HL source + stage-remote-bare — bare dest / HL source + stage-remote-btrfs-snapshots — btrfs dest / btrfs source + stage-remote-btrfs-bare — bare dest / HL source + stage-remote-hl-snapshots — HL dest / HL source + dst-local-bare — chain terminus (bare dest) + """ + volumes: dict[str, LocalVolume | RemoteVolume] = { + "src-local-bare": LocalVolume( + slug="src-local-bare", + path=str(tmp_path / "src-local-bare"), + ), + "stage-local-hl-snapshots": LocalVolume( + slug="stage-local-hl-snapshots", + path=str(tmp_path / "stage-local-hl-snapshots"), + ), + "stage-remote-bare": RemoteVolume( + slug="stage-remote-bare", + ssh_endpoint="via-bastion", + path=f"{REMOTE_BACKUP_PATH}/bare", + ), + "stage-remote-btrfs-snapshots": RemoteVolume( + slug="stage-remote-btrfs-snapshots", + ssh_endpoint="via-bastion", + path=BTRFS_SNAPSHOTS_PATH, + ), + "stage-remote-btrfs-bare": RemoteVolume( + slug="stage-remote-btrfs-bare", + ssh_endpoint="via-bastion", + path=BTRFS_BARE_PATH, + ), + "stage-remote-hl-snapshots": RemoteVolume( + slug="stage-remote-hl-snapshots", + ssh_endpoint="via-bastion", + path=f"{REMOTE_BACKUP_PATH}/hl", + ), + "dst-local-bare": LocalVolume( + slug="dst-local-bare", + path=str(tmp_path / "dst-local-bare"), + ), + } + + hl_src = HardLinkSnapshotConfig(enabled=True) + hl_dst = HardLinkSnapshotConfig(enabled=True) + btrfs_src = BtrfsSnapshotConfig(enabled=True) + btrfs_dst = BtrfsSnapshotConfig(enabled=True) + + syncs: dict[str, SyncConfig] = { + # local→local, HL destination + "step-1": SyncConfig( + slug="step-1", + source=SyncEndpoint(volume="src-local-bare"), + destination=SyncEndpoint( + volume="stage-local-hl-snapshots", + hard_link_snapshots=hl_dst, + ), + ), + # local→remote (bastion), bare destination + "step-2": SyncConfig( + slug="step-2", + source=SyncEndpoint( + volume="stage-local-hl-snapshots", + hard_link_snapshots=hl_src, + ), + destination=SyncEndpoint( + volume="stage-remote-bare", + ), + ), + # remote→remote same-server (bastion), btrfs destination + "step-3": SyncConfig( + slug="step-3", + source=SyncEndpoint( + volume="stage-remote-bare", + ), + destination=SyncEndpoint( + volume="stage-remote-btrfs-snapshots", + btrfs_snapshots=btrfs_dst, + ), + ), + # remote→remote same-server (bastion), bare dest on btrfs + "step-4": SyncConfig( + slug="step-4", + source=SyncEndpoint( + volume="stage-remote-btrfs-snapshots", + btrfs_snapshots=btrfs_src, + ), + destination=SyncEndpoint( + volume="stage-remote-btrfs-bare", + ), + ), + # remote→remote same-server (bastion), HL destination + "step-5": SyncConfig( + slug="step-5", + source=SyncEndpoint( + volume="stage-remote-btrfs-bare", + ), + destination=SyncEndpoint( + volume="stage-remote-hl-snapshots", + hard_link_snapshots=hl_dst, + ), + ), + # remote (bastion)→local, bare destination + "step-6": SyncConfig( + slug="step-6", + source=SyncEndpoint( + volume="stage-remote-hl-snapshots", + hard_link_snapshots=hl_src, + ), + destination=SyncEndpoint(volume="dst-local-bare"), + ), + } + + return Config( + ssh_endpoints={ + "bastion": bastion_endpoint, + "via-bastion": proxied_endpoint, + }, + volumes=volumes, + syncs=syncs, + ) + + +def _assert_trees_equal(expected: Path, actual: Path) -> None: + """Assert two directory trees have identical structure and content.""" + expected_files = { + p.relative_to(expected): p + for p in sorted(expected.rglob("*")) + if p.is_file() and not p.name.startswith(".nbkp-") + } + actual_files = { + p.relative_to(actual): p + for p in sorted(actual.rglob("*")) + if p.is_file() + } + assert set(expected_files) == set(actual_files), ( + f"tree mismatch:\n" + f" missing: {set(expected_files) - set(actual_files)}\n" + f" extra: {set(actual_files) - set(expected_files)}" + ) + for rel, exp_path in expected_files.items(): + assert ( + actual_files[rel].read_bytes() == exp_path.read_bytes() + ), f"content mismatch: {rel}" + + +class TestChainSync: + def test_data_propagates_through_chain( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + bastion_container: SshEndpoint, + proxied_ssh_endpoint: SshEndpoint, + ) -> None: + """Data seeded in src-local-bare arrives at + dst-local-bare after traversing the full chain.""" + # 1. Build config + config = _build_chain_config( + tmp_path, bastion_container, proxied_ssh_endpoint + ) + + # 2. Create btrfs subvolume for the btrfs-snapshots volume + ssh_exec( + ssh_endpoint, + f"btrfs subvolume create {BTRFS_SNAPSHOTS_PATH}", + ) + + # 3. Create sentinels + def _run_remote(cmd: str) -> None: + ssh_exec(ssh_endpoint, cmd) + + create_seed_sentinels(config, remote_exec=_run_remote) + + # 4. Seed data in src-local-bare only + src_vol = config.volumes["src-local-bare"] + seed_volume(src_vol) + src = tmp_path / "src-local-bare" + + # 5. Check all syncs — all should be active + resolved = resolve_all_endpoints(config) + _, sync_statuses = check_all_syncs(config, resolved_endpoints=resolved) + for slug, status in sync_statuses.items(): + assert status.active, ( + f"{slug}: " f"{[r.value for r in status.reasons]}" + ) + + # 6. Run all syncs (topologically ordered) + results = run_all_syncs( + config, + sync_statuses, + resolved_endpoints=resolved, + ) + for r in results: + assert r.success, f"{r.sync_slug}: {r.error}" + + # 7. Verify final destination matches source + dst = tmp_path / "dst-local-bare" + _assert_trees_equal(src, dst) + + # 8. Verify topological ordering + slugs = [r.sync_slug for r in results] + for i in range(1, 6): + assert slugs.index(f"step-{i}") < slugs.index(f"step-{i + 1}") + + # 9. Verify snapshot artifacts on intermediate volumes + # HL dest (step-1): latest symlink on local-hl + local_hl = tmp_path / "stage-local-hl-snapshots" + assert (local_hl / "latest").is_symlink() + _assert_trees_equal(src, local_hl / "latest") + + # Btrfs dest (step-3): snapshot on remote-btrfs + snap_check = ssh_exec( + ssh_endpoint, + f"ls {BTRFS_SNAPSHOTS_PATH}/snapshots/", + ) + assert snap_check.stdout.strip() + + # HL dest (step-5): latest symlink on remote-hl + hl_check = ssh_exec( + ssh_endpoint, + f"readlink {REMOTE_BACKUP_PATH}/hl/latest", + ) + assert "snapshots/" in hl_check.stdout diff --git a/tests/integration/test_check.py b/tests/integration/test_check.py new file mode 100644 index 0000000..4c23902 --- /dev/null +++ b/tests/integration/test_check.py @@ -0,0 +1,288 @@ +"""Integration tests: volume and sync checks.""" + +from __future__ import annotations + +from pathlib import Path + +import pytest + +from nbkp.check import ( + SyncReason, + _check_btrfs_filesystem, + _check_btrfs_subvolume, + check_sync, + check_volume, +) +from nbkp.config import ( + BtrfsSnapshotConfig, + Config, + DestinationSyncEndpoint, + LocalVolume, + RemoteVolume, + SshEndpoint, + SyncConfig, + SyncEndpoint, + resolve_all_endpoints, +) +from nbkp.testkit.docker import REMOTE_BACKUP_PATH, REMOTE_BTRFS_PATH +from nbkp.testkit.gen.fs import create_seed_sentinels + +from .conftest import create_sentinels, ssh_exec + +pytestmark = pytest.mark.integration + + +class TestLocalVolumeCheck: + def test_local_volume_active(self, tmp_path: Path) -> None: + vol_path = tmp_path / "vol" + vol_path.mkdir() + (vol_path / ".nbkp-vol").touch() + + vol = LocalVolume(slug="local", path=str(vol_path)) + config = Config( + volumes={"local": vol}, + ) + status = check_volume(vol, config) + assert status.active is True + + def test_local_volume_inactive(self, tmp_path: Path) -> None: + vol_path = tmp_path / "vol" + vol_path.mkdir() + # No .nbkp-vol sentinel + + vol = LocalVolume(slug="local", path=str(vol_path)) + config = Config( + volumes={"local": vol}, + ) + status = check_volume(vol, config) + assert status.active is False + + +class TestRemoteVolumeCheck: + def test_remote_volume_active( + self, + ssh_endpoint: SshEndpoint, + remote_volume: RemoteVolume, + ) -> None: + create_sentinels(ssh_endpoint, REMOTE_BACKUP_PATH, [".nbkp-vol"]) + config = Config( + ssh_endpoints={"test-server": ssh_endpoint}, + volumes={"test-remote": remote_volume}, + ) + resolved = resolve_all_endpoints(config) + status = check_volume(remote_volume, resolved) + assert status.active is True + + def test_remote_volume_inactive( + self, + ssh_endpoint: SshEndpoint, + remote_volume: RemoteVolume, + ) -> None: + # No sentinel created + config = Config( + ssh_endpoints={"test-server": ssh_endpoint}, + volumes={"test-remote": remote_volume}, + ) + resolved = resolve_all_endpoints(config) + status = check_volume(remote_volume, resolved) + assert status.active is False + + +class TestSyncCheck: + def test_sync_status_active( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + remote_volume: RemoteVolume, + ) -> None: + src_path = tmp_path / "src" + src_vol = LocalVolume(slug="src", path=str(src_path)) + sync = SyncConfig( + slug="test-sync", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + ssh_endpoints={"test-server": ssh_endpoint}, + volumes={"src": src_vol, "dst": remote_volume}, + syncs={"test-sync": sync}, + ) + + def _run_remote(cmd: str) -> None: + ssh_exec(ssh_endpoint, cmd) + + create_seed_sentinels(config, remote_exec=_run_remote) + + resolved = resolve_all_endpoints(config) + src_status = check_volume(src_vol, resolved) + dst_status = check_volume(remote_volume, resolved) + volume_statuses = { + "src": src_status, + "dst": dst_status, + } + + status = check_sync( + sync, + config, + volume_statuses, + resolved_endpoints=resolved, + ) + assert status.active is True + assert status.reasons == [] + + +class TestBtrfsFilesystemCheck: + def test_btrfs_path_detected( + self, + ssh_endpoint: SshEndpoint, + remote_btrfs_volume: RemoteVolume, + ) -> None: + config = Config( + ssh_endpoints={"test-server": ssh_endpoint}, + volumes={"btrfs": remote_btrfs_volume}, + ) + resolved = resolve_all_endpoints(config) + assert _check_btrfs_filesystem(remote_btrfs_volume, resolved) is True + + def test_non_btrfs_path_detected( + self, + ssh_endpoint: SshEndpoint, + remote_volume: RemoteVolume, + ) -> None: + config = Config( + ssh_endpoints={"test-server": ssh_endpoint}, + volumes={"data": remote_volume}, + ) + resolved = resolve_all_endpoints(config) + assert _check_btrfs_filesystem(remote_volume, resolved) is False + + +class TestBtrfsSubvolumeCheck: + def test_subvolume_detected( + self, + ssh_endpoint: SshEndpoint, + remote_btrfs_volume: RemoteVolume, + ) -> None: + ssh_exec( + ssh_endpoint, + f"btrfs subvolume create {REMOTE_BTRFS_PATH}/test-subvol", + ) + config = Config( + ssh_endpoints={"test-server": ssh_endpoint}, + volumes={"btrfs": remote_btrfs_volume}, + ) + resolved = resolve_all_endpoints(config) + assert ( + _check_btrfs_subvolume( + remote_btrfs_volume, + "test-subvol", + resolved, + ) + is True + ) + + # Cleanup + ssh_exec( + ssh_endpoint, + f"btrfs subvolume delete {REMOTE_BTRFS_PATH}/test-subvol", + ) + + def test_regular_dir_not_subvolume( + self, + ssh_endpoint: SshEndpoint, + remote_btrfs_volume: RemoteVolume, + ) -> None: + ssh_exec( + ssh_endpoint, + f"mkdir -p {REMOTE_BTRFS_PATH}/regular-dir", + ) + config = Config( + ssh_endpoints={"test-server": ssh_endpoint}, + volumes={"btrfs": remote_btrfs_volume}, + ) + resolved = resolve_all_endpoints(config) + assert ( + _check_btrfs_subvolume( + remote_btrfs_volume, + "regular-dir", + resolved, + ) + is False + ) + + # Cleanup + ssh_exec( + ssh_endpoint, + f"rm -rf {REMOTE_BTRFS_PATH}/regular-dir", + ) + + +class TestSyncCheckBtrfs: + def test_sync_inactive_when_not_subvolume( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + remote_btrfs_volume: RemoteVolume, + ) -> None: + # Create a regular directory (not a subvolume) + ssh_exec( + ssh_endpoint, + f"mkdir -p {REMOTE_BTRFS_PATH}/not-a-subvol", + ) + create_sentinels( + ssh_endpoint, + REMOTE_BTRFS_PATH, + [".nbkp-vol"], + ) + create_sentinels( + ssh_endpoint, + f"{REMOTE_BTRFS_PATH}/not-a-subvol", + [".nbkp-dst"], + ) + + src_path = tmp_path / "src" + src_path.mkdir() + (src_path / ".nbkp-vol").touch() + (src_path / ".nbkp-src").touch() + + src_vol = LocalVolume(slug="src", path=str(src_path)) + sync = SyncConfig( + slug="test-sync", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + subdir="not-a-subvol", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + config = Config( + ssh_endpoints={"test-server": ssh_endpoint}, + volumes={ + "src": src_vol, + "dst": remote_btrfs_volume, + }, + syncs={"test-sync": sync}, + ) + + resolved = resolve_all_endpoints(config) + src_status = check_volume(src_vol, resolved) + dst_status = check_volume(remote_btrfs_volume, resolved) + volume_statuses = { + "src": src_status, + "dst": dst_status, + } + + status = check_sync( + sync, + config, + volume_statuses, + resolved_endpoints=resolved, + ) + assert status.active is False + assert SyncReason.DESTINATION_NOT_BTRFS_SUBVOLUME in status.reasons + + # Cleanup + ssh_exec( + ssh_endpoint, + f"rm -rf {REMOTE_BTRFS_PATH}/not-a-subvol", + ) diff --git a/tests/integration/test_hardlinks.py b/tests/integration/test_hardlinks.py new file mode 100644 index 0000000..0a6ae62 --- /dev/null +++ b/tests/integration/test_hardlinks.py @@ -0,0 +1,533 @@ +"""Integration tests: hard-link snapshots via remote Docker container.""" + +from __future__ import annotations + +import time +from pathlib import Path + +import pytest + +from nbkp.config import ( + Config, + DestinationSyncEndpoint, + HardLinkSnapshotConfig, + LocalVolume, + RemoteVolume, + ResolvedEndpoints, + SshEndpoint, + SyncConfig, + SyncEndpoint, + resolve_all_endpoints, +) +from nbkp.sync.btrfs import list_snapshots +from nbkp.sync.hardlinks import ( + cleanup_orphaned_snapshots, + create_snapshot_dir, + prune_snapshots, + read_latest_symlink, + update_latest_symlink, +) +from nbkp.sync.rsync import run_rsync +from nbkp.testkit.docker import REMOTE_BACKUP_PATH +from nbkp.testkit.gen.fs import create_seed_sentinels + +from .conftest import ssh_exec + +pytestmark = pytest.mark.integration + + +def _make_hl_config( + src_path: str, + remote_hl_volume: RemoteVolume, + ssh_endpoint: SshEndpoint, + max_snapshots: int | None = 5, +) -> tuple[SyncConfig, Config, ResolvedEndpoints]: + """Build hard-link config and create seed sentinels.""" + src_vol = LocalVolume(slug="src", path=src_path) + sync = SyncConfig( + slug="test-sync", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + hard_link_snapshots=HardLinkSnapshotConfig( + enabled=True, max_snapshots=max_snapshots + ), + ), + ) + config = Config( + ssh_endpoints={"test-server": ssh_endpoint}, + volumes={ + "src": src_vol, + "dst": remote_hl_volume, + }, + syncs={"test-sync": sync}, + ) + + def _run_remote(cmd: str) -> None: + ssh_exec(ssh_endpoint, cmd) + + create_seed_sentinels(config, remote_exec=_run_remote) + + resolved = resolve_all_endpoints(config) + return sync, config, resolved + + +def _do_sync( + src: Path, + ssh_endpoint: SshEndpoint, + remote_hl_volume: RemoteVolume, + max_snapshots: int | None = 5, +) -> tuple[SyncConfig, Config, ResolvedEndpoints, str]: + """rsync + create snapshot dir + update symlink. Returns config + tuple and the snapshot name.""" + sync, config, resolved = _make_hl_config( + str(src), remote_hl_volume, ssh_endpoint, max_snapshots + ) + snapshot_path = create_snapshot_dir( + sync, config, resolved_endpoints=resolved + ) + snapshot_name = snapshot_path.rsplit("/", 1)[-1] + + result = run_rsync( + sync, + config, + dest_suffix=f"snapshots/{snapshot_name}", + resolved_endpoints=resolved, + ) + assert result.returncode == 0 + + update_latest_symlink( + sync, config, snapshot_name, resolved_endpoints=resolved + ) + return sync, config, resolved, snapshot_name + + +class TestHardLinkSnapshots: + def test_snapshot_dir_created( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + remote_hardlink_volume: RemoteVolume, + ) -> None: + src = tmp_path / "src" + src.mkdir() + (src / "data.txt").write_text("snapshot me") + + sync, config, resolved = _make_hl_config( + str(src), remote_hardlink_volume, ssh_endpoint + ) + snapshot_path = create_snapshot_dir( + sync, config, resolved_endpoints=resolved + ) + + # Verify directory exists on remote + check = ssh_exec(ssh_endpoint, f"test -d {snapshot_path}") + assert check.returncode == 0 + + def test_rsync_into_snapshot( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + remote_hardlink_volume: RemoteVolume, + ) -> None: + src = tmp_path / "src" + src.mkdir() + (src / "hello.txt").write_text("hello hard-link") + + sync, config, resolved = _make_hl_config( + str(src), remote_hardlink_volume, ssh_endpoint + ) + snapshot_path = create_snapshot_dir( + sync, config, resolved_endpoints=resolved + ) + snapshot_name = snapshot_path.rsplit("/", 1)[-1] + + result = run_rsync( + sync, + config, + dest_suffix=f"snapshots/{snapshot_name}", + resolved_endpoints=resolved, + ) + assert result.returncode == 0 + + # Verify file arrived in the snapshot dir + check = ssh_exec( + ssh_endpoint, + f"cat {snapshot_path}/hello.txt", + ) + assert check.returncode == 0 + assert check.stdout.strip() == "hello hard-link" + + def test_latest_symlink_updated( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + remote_hardlink_volume: RemoteVolume, + ) -> None: + src = tmp_path / "src" + src.mkdir() + (src / "data.txt").write_text("symlink test") + + sync, config, resolved, snap_name = _do_sync( + src, ssh_endpoint, remote_hardlink_volume + ) + + # Verify symlink exists and points to correct snapshot + latest_name = read_latest_symlink( + sync, config, resolved_endpoints=resolved + ) + assert latest_name == snap_name + + # Verify the file is accessible via the symlink + check = ssh_exec( + ssh_endpoint, + f"cat {REMOTE_BACKUP_PATH}/latest/data.txt", + ) + assert check.returncode == 0 + assert check.stdout.strip() == "symlink test" + + def test_second_sync_uses_link_dest( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + remote_hardlink_volume: RemoteVolume, + ) -> None: + src = tmp_path / "src" + src.mkdir() + (src / "file.txt").write_text("v1") + + # First sync + sync, config, resolved, snap1 = _do_sync( + src, ssh_endpoint, remote_hardlink_volume + ) + + time.sleep(0.1) # distinct timestamp + + # Second sync with link-dest from first snapshot + snapshot_path = create_snapshot_dir( + sync, config, resolved_endpoints=resolved + ) + snap2 = snapshot_path.rsplit("/", 1)[-1] + + result = run_rsync( + sync, + config, + link_dest=f"../{snap1}", + dest_suffix=f"snapshots/{snap2}", + resolved_endpoints=resolved, + ) + assert result.returncode == 0 + + update_latest_symlink(sync, config, snap2, resolved_endpoints=resolved) + + # Verify second snapshot has the file + check = ssh_exec( + ssh_endpoint, + f"cat {REMOTE_BACKUP_PATH}/snapshots/{snap2}/file.txt", + ) + assert check.returncode == 0 + assert check.stdout.strip() == "v1" + + # Verify latest now points to second snapshot + latest_name = read_latest_symlink( + sync, config, resolved_endpoints=resolved + ) + assert latest_name == snap2 + + def test_incremental_hard_links( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + remote_hardlink_volume: RemoteVolume, + ) -> None: + """Unchanged files should be hard-linked between snapshots.""" + src = tmp_path / "src" + src.mkdir() + (src / "unchanged.txt").write_text("same content") + (src / "changed.txt").write_text("v1") + + # First sync + sync, config, resolved, snap1 = _do_sync( + src, ssh_endpoint, remote_hardlink_volume + ) + + time.sleep(0.1) + + # Modify one file, leave the other unchanged + (src / "changed.txt").write_text("v2 is different") + + # Second sync with --link-dest from first + snapshot_path = create_snapshot_dir( + sync, config, resolved_endpoints=resolved + ) + snap2 = snapshot_path.rsplit("/", 1)[-1] + result = run_rsync( + sync, + config, + link_dest=f"../{snap1}", + dest_suffix=f"snapshots/{snap2}", + resolved_endpoints=resolved, + ) + assert result.returncode == 0 + + # Verify the unchanged file shares inode (hard-linked) + inode1 = ssh_exec( + ssh_endpoint, + f"stat -c %i" + f" {REMOTE_BACKUP_PATH}/snapshots/{snap1}/unchanged.txt", + ) + inode2 = ssh_exec( + ssh_endpoint, + f"stat -c %i" + f" {REMOTE_BACKUP_PATH}/snapshots/{snap2}/unchanged.txt", + ) + assert inode1.stdout.strip() == inode2.stdout.strip() + + # Verify the changed file has different content + check = ssh_exec( + ssh_endpoint, + f"cat {REMOTE_BACKUP_PATH}/snapshots/{snap2}/changed.txt", + ) + assert check.stdout.strip() == "v2 is different" + + def test_dry_run_no_symlink_update( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + remote_hardlink_volume: RemoteVolume, + ) -> None: + src = tmp_path / "src" + src.mkdir() + (src / "data.txt").write_text("dry run") + + sync, config, resolved = _make_hl_config( + str(src), remote_hardlink_volume, ssh_endpoint + ) + snapshot_path = create_snapshot_dir( + sync, config, resolved_endpoints=resolved + ) + snapshot_name = snapshot_path.rsplit("/", 1)[-1] + + # Dry-run rsync + result = run_rsync( + sync, + config, + dry_run=True, + dest_suffix=f"snapshots/{snapshot_name}", + resolved_endpoints=resolved, + ) + assert result.returncode == 0 + + # Symlink should not exist (never updated) + latest = read_latest_symlink(sync, config, resolved_endpoints=resolved) + assert latest is None + + +class TestHardLinkOrphanCleanup: + def test_orphan_cleaned_up( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + remote_hardlink_volume: RemoteVolume, + ) -> None: + src = tmp_path / "src" + src.mkdir() + (src / "data.txt").write_text("orphan test") + + # First sync (complete) + sync, config, resolved, snap1 = _do_sync( + src, ssh_endpoint, remote_hardlink_volume + ) + + time.sleep(0.1) + + # Simulate a failed sync: create a snapshot dir newer than + # latest but don't update the symlink + ssh_exec( + ssh_endpoint, + ( + "mkdir -p" + f" {REMOTE_BACKUP_PATH}/snapshots/9999-99-99T00:00:00.000Z" + ), + ) + + # Verify orphan exists + check = ssh_exec( + ssh_endpoint, + ( + "test -d" + f" {REMOTE_BACKUP_PATH}/snapshots/9999-99-99T00:00:00.000Z" + ), + ) + assert check.returncode == 0 + + # Cleanup should remove it + deleted = cleanup_orphaned_snapshots( + sync, config, resolved_endpoints=resolved + ) + assert len(deleted) == 1 + assert "9999-99-99T00:00:00.000Z" in deleted[0] + + # Verify orphan is gone + check = ssh_exec( + ssh_endpoint, + ( + "test -d" + f" {REMOTE_BACKUP_PATH}/snapshots/9999-99-99T00:00:00.000Z" + ), + check=False, + ) + assert check.returncode != 0 + + # Verify the real snapshot is still there + check = ssh_exec( + ssh_endpoint, + f"test -d {REMOTE_BACKUP_PATH}/snapshots/{snap1}", + ) + assert check.returncode == 0 + + def test_no_cleanup_without_latest( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + remote_hardlink_volume: RemoteVolume, + ) -> None: + src = tmp_path / "src" + src.mkdir() + + sync, config, resolved = _make_hl_config( + str(src), remote_hardlink_volume, ssh_endpoint + ) + + # No latest symlink -> no cleanup possible + deleted = cleanup_orphaned_snapshots( + sync, config, resolved_endpoints=resolved + ) + assert deleted == [] + + +class TestHardLinkPrune: + def _create_snapshots( + self, + src: Path, + ssh_endpoint: SshEndpoint, + remote_hl_volume: RemoteVolume, + count: int, + max_snapshots: int | None = None, + ) -> tuple[SyncConfig, Config, ResolvedEndpoints, list[str]]: + """Create multiple snapshots with distinct timestamps.""" + names: list[str] = [] + sync: SyncConfig | None = None + config: Config | None = None + resolved: ResolvedEndpoints | None = None + + for _ in range(count): + sync, config, resolved, snap_name = _do_sync( + src, + ssh_endpoint, + remote_hl_volume, + max_snapshots=max_snapshots, + ) + names.append(snap_name) + time.sleep(0.1) # distinct timestamps + + assert sync is not None + assert config is not None + assert resolved is not None + return sync, config, resolved, names + + def test_prune_deletes_oldest( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + remote_hardlink_volume: RemoteVolume, + ) -> None: + src = tmp_path / "src" + src.mkdir() + (src / "data.txt").write_text("prune test") + + sync, config, resolved, names = self._create_snapshots( + src, ssh_endpoint, remote_hardlink_volume, 3 + ) + + # Prune to keep only 1 + deleted = prune_snapshots(sync, config, 1, resolved_endpoints=resolved) + assert len(deleted) == 2 + + # Verify only the latest snapshot remains + remaining = list_snapshots(sync, config, resolved) + assert len(remaining) == 1 + assert names[-1] in remaining[0] + + def test_prune_never_deletes_latest( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + remote_hardlink_volume: RemoteVolume, + ) -> None: + src = tmp_path / "src" + src.mkdir() + (src / "data.txt").write_text("never delete latest") + + sync, config, resolved, names = self._create_snapshots( + src, ssh_endpoint, remote_hardlink_volume, 2 + ) + + # latest points to names[-1]; prune to 0 should still + # keep it + deleted = prune_snapshots(sync, config, 0, resolved_endpoints=resolved) + + # names[0] should be deleted, names[-1] (latest) kept + assert len(deleted) == 1 + assert names[0] in deleted[0] + + remaining = list_snapshots(sync, config, resolved) + assert len(remaining) == 1 + assert names[-1] in remaining[0] + + def test_prune_dry_run_keeps_all( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + remote_hardlink_volume: RemoteVolume, + ) -> None: + src = tmp_path / "src" + src.mkdir() + (src / "data.txt").write_text("dry run prune") + + sync, config, resolved, names = self._create_snapshots( + src, ssh_endpoint, remote_hardlink_volume, 3 + ) + + # Dry-run prune to 1 + deleted = prune_snapshots( + sync, config, 1, dry_run=True, resolved_endpoints=resolved + ) + assert len(deleted) == 2 + + # All 3 snapshots still exist + remaining = list_snapshots(sync, config, resolved) + assert len(remaining) == 3 + + def test_prune_noop_under_limit( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + remote_hardlink_volume: RemoteVolume, + ) -> None: + src = tmp_path / "src" + src.mkdir() + (src / "data.txt").write_text("noop prune") + + sync, config, resolved, _ = self._create_snapshots( + src, ssh_endpoint, remote_hardlink_volume, 2 + ) + + # Prune with limit higher than count + deleted = prune_snapshots( + sync, config, 10, resolved_endpoints=resolved + ) + assert deleted == [] + + remaining = list_snapshots(sync, config, resolved) + assert len(remaining) == 2 diff --git a/tests/integration/test_local_to_local.py b/tests/integration/test_local_to_local.py new file mode 100644 index 0000000..0894d6d --- /dev/null +++ b/tests/integration/test_local_to_local.py @@ -0,0 +1,137 @@ +"""Integration tests: local-to-local sync (no Docker needed).""" + +from __future__ import annotations + +from pathlib import Path + +from nbkp.config import ( + BtrfsSnapshotConfig, + Config, + DestinationSyncEndpoint, + LocalVolume, + SyncConfig, + SyncEndpoint, +) +from nbkp.sync.rsync import run_rsync + + +def _make_local_config( + src_path: str, + dst_path: str, + src_subdir: str | None = None, + dst_subdir: str | None = None, + btrfs_snapshots: BtrfsSnapshotConfig | None = None, +) -> tuple[SyncConfig, Config]: + src_vol = LocalVolume(slug="src", path=src_path) + dst_vol = LocalVolume(slug="dst", path=dst_path) + destination = DestinationSyncEndpoint( + volume="dst", + subdir=dst_subdir, + ) + if btrfs_snapshots is not None: + destination = DestinationSyncEndpoint( + volume="dst", + subdir=dst_subdir, + btrfs_snapshots=btrfs_snapshots, + ) + sync = SyncConfig( + slug="test-sync", + source=SyncEndpoint(volume="src", subdir=src_subdir), + destination=destination, + ) + config = Config( + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"test-sync": sync}, + ) + return sync, config + + +class TestLocalToLocal: + def test_basic_sync(self, tmp_path: Path) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + + (src / "file1.txt").write_text("hello") + (src / "file2.txt").write_text("world") + + sync, config = _make_local_config(str(src), str(dst)) + result = run_rsync(sync, config) + + assert result.returncode == 0 + assert (dst / "file1.txt").read_text() == "hello" + assert (dst / "file2.txt").read_text() == "world" + + def test_incremental_sync(self, tmp_path: Path) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + + (src / "file1.txt").write_text("version-one") + + sync, config = _make_local_config(str(src), str(dst)) + run_rsync(sync, config) + assert (dst / "file1.txt").read_text() == "version-one" + + # Modify (different size) and re-sync + (src / "file1.txt").write_text("version-two-updated") + result = run_rsync(sync, config) + + assert result.returncode == 0 + assert (dst / "file1.txt").read_text() == "version-two-updated" + + def test_delete_propagation(self, tmp_path: Path) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + + (src / "keep.txt").write_text("keep") + (src / "remove.txt").write_text("remove") + + sync, config = _make_local_config(str(src), str(dst)) + run_rsync(sync, config) + assert (dst / "remove.txt").exists() + + # Delete from source and re-sync (--delete is in rsync args) + (src / "remove.txt").unlink() + result = run_rsync(sync, config) + + assert result.returncode == 0 + assert (dst / "keep.txt").exists() + assert not (dst / "remove.txt").exists() + + def test_dry_run_no_copy(self, tmp_path: Path) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + + (src / "file.txt").write_text("data") + + sync, config = _make_local_config(str(src), str(dst)) + result = run_rsync(sync, config, dry_run=True) + + assert result.returncode == 0 + assert not (dst / "file.txt").exists() + + def test_subdir(self, tmp_path: Path) -> None: + src = tmp_path / "src" / "photos" + dst = tmp_path / "dst" / "photos-backup" + src.mkdir(parents=True) + dst.mkdir(parents=True) + + (src / "img.jpg").write_text("jpeg-data") + + sync, config = _make_local_config( + str(tmp_path / "src"), + str(tmp_path / "dst"), + src_subdir="photos", + dst_subdir="photos-backup", + ) + result = run_rsync(sync, config) + + assert result.returncode == 0 + assert (dst / "img.jpg").read_text() == "jpeg-data" diff --git a/tests/integration/test_local_to_remote.py b/tests/integration/test_local_to_remote.py new file mode 100644 index 0000000..34ff7a1 --- /dev/null +++ b/tests/integration/test_local_to_remote.py @@ -0,0 +1,116 @@ +"""Integration tests: local-to-remote sync (Docker).""" + +from __future__ import annotations + +from pathlib import Path + +import pytest + +from nbkp.config import ( + Config, + DestinationSyncEndpoint, + LocalVolume, + RemoteVolume, + SshEndpoint, + SyncConfig, + SyncEndpoint, + resolve_all_endpoints, +) +from nbkp.sync.rsync import run_rsync +from nbkp.testkit.docker import REMOTE_BACKUP_PATH +from nbkp.testkit.gen.fs import create_seed_sentinels + +from .conftest import ssh_exec + +pytestmark = pytest.mark.integration + + +class TestLocalToRemote: + def test_sync_to_container( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + remote_volume: RemoteVolume, + ) -> None: + # Create local source files + src_dir = tmp_path / "src" + src_dir.mkdir() + (src_dir / "hello.txt").write_text("hello from local") + + src_vol = LocalVolume(slug="src", path=str(src_dir)) + sync = SyncConfig( + slug="test-sync", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + ssh_endpoints={"test-server": ssh_endpoint}, + volumes={"src": src_vol, "dst": remote_volume}, + syncs={"test-sync": sync}, + ) + + def _run_remote(cmd: str) -> None: + ssh_exec(ssh_endpoint, cmd) + + create_seed_sentinels(config, remote_exec=_run_remote) + + resolved = resolve_all_endpoints(config) + result = run_rsync( + sync, + config, + resolved_endpoints=resolved, + ) + assert result.returncode == 0 + + # Verify file arrived on container + check = ssh_exec( + ssh_endpoint, + f"cat {REMOTE_BACKUP_PATH}/hello.txt", + ) + assert check.returncode == 0 + assert check.stdout.strip() == "hello from local" + + def test_sync_with_subdir( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + remote_volume: RemoteVolume, + ) -> None: + # Create local source with subdir + src_dir = tmp_path / "src" / "photos" + src_dir.mkdir(parents=True) + (src_dir / "img.jpg").write_text("image-data") + + src_vol = LocalVolume(slug="src", path=str(tmp_path / "src")) + sync = SyncConfig( + slug="test-sync", + source=SyncEndpoint(volume="src", subdir="photos"), + destination=DestinationSyncEndpoint( + volume="dst", subdir="photos-backup" + ), + ) + config = Config( + ssh_endpoints={"test-server": ssh_endpoint}, + volumes={"src": src_vol, "dst": remote_volume}, + syncs={"test-sync": sync}, + ) + + def _run_remote(cmd: str) -> None: + ssh_exec(ssh_endpoint, cmd) + + create_seed_sentinels(config, remote_exec=_run_remote) + + resolved = resolve_all_endpoints(config) + result = run_rsync( + sync, + config, + resolved_endpoints=resolved, + ) + assert result.returncode == 0 + + check = ssh_exec( + ssh_endpoint, + f"cat {REMOTE_BACKUP_PATH}/photos-backup/img.jpg", + ) + assert check.returncode == 0 + assert check.stdout.strip() == "image-data" diff --git a/tests/integration/test_proxy_jump.py b/tests/integration/test_proxy_jump.py new file mode 100644 index 0000000..fd5a6eb --- /dev/null +++ b/tests/integration/test_proxy_jump.py @@ -0,0 +1,80 @@ +"""Integration tests: proxy jump (bastion) support.""" + +from __future__ import annotations + +from pathlib import Path + +import pytest + +from nbkp.config import ( + Config, + DestinationSyncEndpoint, + LocalVolume, + RemoteVolume, + SshEndpoint, + SyncConfig, + SyncEndpoint, + resolve_all_endpoints, +) +from nbkp.sync.rsync import run_rsync +from nbkp.testkit.docker import REMOTE_BACKUP_PATH +from nbkp.testkit.gen.fs import create_seed_sentinels + +from .conftest import ssh_exec + +pytestmark = pytest.mark.integration + + +class TestProxyJump: + def test_sync_through_bastion( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + bastion_container: SshEndpoint, + proxied_ssh_endpoint: SshEndpoint, + ) -> None: + # Create local source files + src_dir = tmp_path / "src" + src_dir.mkdir() + (src_dir / "hello.txt").write_text("via bastion") + + src_vol = LocalVolume(slug="src", path=str(src_dir)) + dst_vol = RemoteVolume( + slug="dst", + ssh_endpoint="proxied-server", + path=REMOTE_BACKUP_PATH, + ) + sync = SyncConfig( + slug="test-sync", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + ssh_endpoints={ + "bastion": bastion_container, + "proxied-server": proxied_ssh_endpoint, + }, + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"test-sync": sync}, + ) + + def _run_remote(cmd: str) -> None: + ssh_exec(ssh_endpoint, cmd) + + create_seed_sentinels(config, remote_exec=_run_remote) + + resolved = resolve_all_endpoints(config) + result = run_rsync( + sync, + config, + resolved_endpoints=resolved, + ) + assert result.returncode == 0 + + # Verify file arrived via direct connection + check = ssh_exec( + ssh_endpoint, + f"cat {REMOTE_BACKUP_PATH}/hello.txt", + ) + assert check.returncode == 0 + assert check.stdout.strip() == "via bastion" diff --git a/tests/integration/test_remote_to_local.py b/tests/integration/test_remote_to_local.py new file mode 100644 index 0000000..a7e1b8a --- /dev/null +++ b/tests/integration/test_remote_to_local.py @@ -0,0 +1,123 @@ +"""Integration tests: remote-to-local sync (Docker).""" + +from __future__ import annotations + +from pathlib import Path + +import pytest + +from nbkp.config import ( + Config, + DestinationSyncEndpoint, + LocalVolume, + RemoteVolume, + SshEndpoint, + SyncConfig, + SyncEndpoint, + resolve_all_endpoints, +) +from nbkp.sync.rsync import run_rsync +from nbkp.testkit.docker import REMOTE_BACKUP_PATH +from nbkp.testkit.gen.fs import create_seed_sentinels + +from .conftest import ssh_exec + +pytestmark = pytest.mark.integration + + +class TestRemoteToLocal: + def test_sync_from_container( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + remote_volume: RemoteVolume, + ) -> None: + # Create test file on remote source + ssh_exec( + ssh_endpoint, + "echo 'hello from remote'" + f" > {REMOTE_BACKUP_PATH}/remote-file.txt", + ) + + dst_dir = tmp_path / "dst" + dst_dir.mkdir() + + dst_vol = LocalVolume(slug="dst", path=str(dst_dir)) + sync = SyncConfig( + slug="test-sync", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + ssh_endpoints={"test-server": ssh_endpoint}, + volumes={"src": remote_volume, "dst": dst_vol}, + syncs={"test-sync": sync}, + ) + + def _run_remote(cmd: str) -> None: + ssh_exec(ssh_endpoint, cmd) + + create_seed_sentinels(config, remote_exec=_run_remote) + + resolved = resolve_all_endpoints(config) + result = run_rsync( + sync, + config, + resolved_endpoints=resolved, + ) + assert result.returncode == 0 + + # Verify file arrived locally + local_file = dst_dir / "remote-file.txt" + assert local_file.exists() + assert local_file.read_text().strip() == "hello from remote" + + def test_sync_with_subdir( + self, + tmp_path: Path, + ssh_endpoint: SshEndpoint, + remote_volume: RemoteVolume, + ) -> None: + # Create test file in a subdir on remote source + ssh_exec( + ssh_endpoint, + f"mkdir -p {REMOTE_BACKUP_PATH}/photos", + ) + ssh_exec( + ssh_endpoint, + "echo 'image-data'" f" > {REMOTE_BACKUP_PATH}/photos/img.jpg", + ) + + dst_dir = tmp_path / "dst" + dst_dir.mkdir() + + dst_vol = LocalVolume(slug="dst", path=str(dst_dir)) + sync = SyncConfig( + slug="test-sync", + source=SyncEndpoint(volume="src", subdir="photos"), + destination=DestinationSyncEndpoint( + volume="dst", subdir="photos-backup" + ), + ) + config = Config( + ssh_endpoints={"test-server": ssh_endpoint}, + volumes={"src": remote_volume, "dst": dst_vol}, + syncs={"test-sync": sync}, + ) + + def _run_remote(cmd: str) -> None: + ssh_exec(ssh_endpoint, cmd) + + create_seed_sentinels(config, remote_exec=_run_remote) + + resolved = resolve_all_endpoints(config) + result = run_rsync( + sync, + config, + resolved_endpoints=resolved, + ) + assert result.returncode == 0 + + local_file = dst_dir / "photos-backup" / "img.jpg" + assert local_file.exists() + assert local_file.read_text().strip() == "image-data" diff --git a/tests/integration/test_remote_to_remote_same_server.py b/tests/integration/test_remote_to_remote_same_server.py new file mode 100644 index 0000000..f858545 --- /dev/null +++ b/tests/integration/test_remote_to_remote_same_server.py @@ -0,0 +1,75 @@ +"""Integration tests: remote-to-remote sync, same server (Docker).""" + +from __future__ import annotations + +import pytest + +from nbkp.config import ( + Config, + DestinationSyncEndpoint, + RemoteVolume, + SshEndpoint, + SyncConfig, + SyncEndpoint, + resolve_all_endpoints, +) +from nbkp.sync.rsync import run_rsync +from nbkp.testkit.docker import REMOTE_BACKUP_PATH +from nbkp.testkit.gen.fs import create_seed_sentinels + +from .conftest import ssh_exec + +pytestmark = pytest.mark.integration + + +class TestRemoteToRemoteSameServer: + def test_sync_on_container( + self, + ssh_endpoint: SshEndpoint, + ) -> None: + src_vol = RemoteVolume( + slug="src-remote", + ssh_endpoint="test-server", + path=f"{REMOTE_BACKUP_PATH}/src", + ) + dst_vol = RemoteVolume( + slug="dst-remote", + ssh_endpoint="test-server", + path=f"{REMOTE_BACKUP_PATH}/dst", + ) + sync = SyncConfig( + slug="test-sync", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + ssh_endpoints={"test-server": ssh_endpoint}, + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"test-sync": sync}, + ) + + def _run_remote(cmd: str) -> None: + ssh_exec(ssh_endpoint, cmd) + + create_seed_sentinels(config, remote_exec=_run_remote) + + # Create test file on remote source + ssh_exec( + ssh_endpoint, + "echo 'hello from remote'" + f" > {REMOTE_BACKUP_PATH}/src/remote-file.txt", + ) + + resolved = resolve_all_endpoints(config) + result = run_rsync( + sync, + config, + resolved_endpoints=resolved, + ) + assert result.returncode == 0 + + out = ssh_exec( + ssh_endpoint, + f"cat {REMOTE_BACKUP_PATH}/dst/remote-file.txt", + ) + assert out.stdout.strip() == "hello from remote" diff --git a/tests/remote/__init__.py b/tests/remote/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/remote/test_resolution.py b/tests/remote/test_resolution.py new file mode 100644 index 0000000..3dd6727 --- /dev/null +++ b/tests/remote/test_resolution.py @@ -0,0 +1,155 @@ +"""Tests for nbkp.remote.resolution.""" + +from __future__ import annotations + +import socket + +import paramiko +import pytest + +from nbkp.remote.resolution import ( + is_private_host, + resolve_host, + resolve_hostname, +) + + +class TestResolveHostname: + """Tests for resolve_hostname (SSH config lookup).""" + + def test_from_ssh_config(self, monkeypatch: pytest.MonkeyPatch) -> None: + ssh_config = paramiko.SSHConfig.from_text( + "Host mynas\n HostName 192.168.1.100\n" + ) + monkeypatch.setattr( + "nbkp.remote.resolution._load_ssh_config", + lambda: ssh_config, + ) + assert resolve_hostname("mynas") == "192.168.1.100" + + def test_no_ssh_config(self, monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setattr( + "nbkp.remote.resolution._load_ssh_config", + lambda: None, + ) + assert resolve_hostname("mynas") == "mynas" + + def test_not_in_config(self, monkeypatch: pytest.MonkeyPatch) -> None: + ssh_config = paramiko.SSHConfig.from_text( + "Host other\n HostName 10.0.0.1\n" + ) + monkeypatch.setattr( + "nbkp.remote.resolution._load_ssh_config", + lambda: ssh_config, + ) + assert resolve_hostname("mynas") == "mynas" + + def test_with_port_and_user(self, monkeypatch: pytest.MonkeyPatch) -> None: + ssh_config = paramiko.SSHConfig.from_text( + "Host mynas\n" + " HostName 192.168.1.100\n" + " Port 2222\n" + " User backup\n" + ) + monkeypatch.setattr( + "nbkp.remote.resolution._load_ssh_config", + lambda: ssh_config, + ) + # resolve_hostname only returns the hostname + assert resolve_hostname("mynas") == "192.168.1.100" + + +class TestResolveHost: + """Tests for resolve_host (SSH config + DNS).""" + + def test_via_ssh_config(self, monkeypatch: pytest.MonkeyPatch) -> None: + ssh_config = paramiko.SSHConfig.from_text( + "Host mynas\n HostName 127.0.0.1\n" + ) + monkeypatch.setattr( + "nbkp.remote.resolution._load_ssh_config", + lambda: ssh_config, + ) + addrs = resolve_host("mynas") + assert addrs is not None + assert "127.0.0.1" in addrs + + def test_unresolvable(self, monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setattr( + "nbkp.remote.resolution._load_ssh_config", + lambda: None, + ) + monkeypatch.setattr( + "nbkp.remote.resolution.socket.getaddrinfo", + _raise_gaierror, + ) + assert resolve_host("nonexistent.invalid") is None + + def test_direct_hostname(self, monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setattr( + "nbkp.remote.resolution._load_ssh_config", + lambda: None, + ) + addrs = resolve_host("localhost") + assert addrs is not None + assert len(addrs) > 0 + + +class TestIsPrivateHost: + """Tests for is_private_host (SSH config + DNS + IP).""" + + def test_private_via_ssh_config( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + ssh_config = paramiko.SSHConfig.from_text( + "Host mynas\n HostName 192.168.1.100\n" + ) + monkeypatch.setattr( + "nbkp.remote.resolution._load_ssh_config", + lambda: ssh_config, + ) + monkeypatch.setattr( + "nbkp.remote.resolution.socket.getaddrinfo", + lambda host, port: [ + (None, None, None, None, ("192.168.1.100", 0)) + ], + ) + assert is_private_host("mynas") is True + + def test_public_via_ssh_config( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + ssh_config = paramiko.SSHConfig.from_text( + "Host mypublic\n HostName 8.8.8.8\n" + ) + monkeypatch.setattr( + "nbkp.remote.resolution._load_ssh_config", + lambda: ssh_config, + ) + monkeypatch.setattr( + "nbkp.remote.resolution.socket.getaddrinfo", + lambda host, port: [(None, None, None, None, ("8.8.8.8", 0))], + ) + assert is_private_host("mypublic") is False + + def test_unresolvable(self, monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setattr( + "nbkp.remote.resolution._load_ssh_config", + lambda: None, + ) + monkeypatch.setattr( + "nbkp.remote.resolution.socket.getaddrinfo", + _raise_gaierror, + ) + assert is_private_host("nonexistent.invalid") is None + + def test_localhost(self, monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setattr( + "nbkp.remote.resolution._load_ssh_config", + lambda: None, + ) + assert is_private_host("localhost") is True + + +def _raise_gaierror(*args: object, **kwargs: object) -> None: + raise socket.gaierror("mocked DNS failure") diff --git a/tests/remote/test_ssh.py b/tests/remote/test_ssh.py new file mode 100644 index 0000000..75c9905 --- /dev/null +++ b/tests/remote/test_ssh.py @@ -0,0 +1,473 @@ +"""Tests for nbkp.ssh.""" + +from __future__ import annotations + +from unittest.mock import MagicMock, patch + +from nbkp.config import SshEndpoint, SshConnectionOptions +from nbkp.remote import ( + build_ssh_base_args, + build_ssh_e_option, + format_remote_path, + run_remote_command, +) + +_DEFAULT_O_OPTIONS = [ + "-o", + "ConnectTimeout=10", + "-o", + "BatchMode=yes", +] + + +class TestBuildSshBaseArgs: + def test_minimal(self, ssh_endpoint_minimal: SshEndpoint) -> None: + args = build_ssh_base_args(ssh_endpoint_minimal) + assert args == [ + "ssh", + *_DEFAULT_O_OPTIONS, + "nas2.example.com", + ] + + def test_full(self, ssh_endpoint: SshEndpoint) -> None: + args = build_ssh_base_args(ssh_endpoint) + assert args == [ + "ssh", + *_DEFAULT_O_OPTIONS, + "-p", + "5022", + "-i", + "~/.ssh/key", + "backup@nas.example.com", + ] + + def test_with_ssh_options(self) -> None: + server = SshEndpoint( + slug="host-server", + host="host.example.com", + connection_options=SshConnectionOptions( + strict_host_key_checking=False, + known_hosts_file="/dev/null", + ), + ) + args = build_ssh_base_args(server) + assert args == [ + "ssh", + *_DEFAULT_O_OPTIONS, + "-o", + "StrictHostKeyChecking=no", + "-o", + "UserKnownHostsFile=/dev/null", + "host.example.com", + ] + + def test_custom_connect_timeout(self) -> None: + server = SshEndpoint( + slug="slow-server", + host="slow.example.com", + connection_options=SshConnectionOptions(connect_timeout=30), + ) + args = build_ssh_base_args(server) + assert args == [ + "ssh", + "-o", + "ConnectTimeout=30", + "-o", + "BatchMode=yes", + "slow.example.com", + ] + + def test_compress(self) -> None: + server = SshEndpoint( + slug="compressed", + host="host.example.com", + connection_options=SshConnectionOptions(compress=True), + ) + args = build_ssh_base_args(server) + assert args == [ + "ssh", + *_DEFAULT_O_OPTIONS, + "-o", + "Compression=yes", + "host.example.com", + ] + + def test_forward_agent(self) -> None: + server = SshEndpoint( + slug="forwarded", + host="host.example.com", + connection_options=SshConnectionOptions(forward_agent=True), + ) + args = build_ssh_base_args(server) + assert args == [ + "ssh", + *_DEFAULT_O_OPTIONS, + "-o", + "ForwardAgent=yes", + "host.example.com", + ] + + def test_server_alive_interval(self) -> None: + server = SshEndpoint( + slug="keepalive", + host="host.example.com", + connection_options=SshConnectionOptions( + server_alive_interval=60, + ), + ) + args = build_ssh_base_args(server) + assert args == [ + "ssh", + *_DEFAULT_O_OPTIONS, + "-o", + "ServerAliveInterval=60", + "host.example.com", + ] + + def test_proxy_jump_with_user_and_port(self) -> None: + server = SshEndpoint( + slug="target", + host="target.example.com", + ) + proxy = SshEndpoint( + slug="bastion", + host="bastion.example.com", + port=2222, + user="admin", + ) + args = build_ssh_base_args(server, [proxy]) + proxy_cmd = ( + "ssh -o ConnectTimeout=10 -o BatchMode=yes" + " -p 2222" + " -W %h:%p admin@bastion.example.com" + ) + assert args == [ + "ssh", + *_DEFAULT_O_OPTIONS, + "-o", + f"ProxyCommand={proxy_cmd}", + "target.example.com", + ] + + def test_proxy_jump_default_port(self) -> None: + server = SshEndpoint( + slug="target", + host="target.example.com", + ) + proxy = SshEndpoint( + slug="bastion", + host="bastion.example.com", + user="admin", + ) + args = build_ssh_base_args(server, [proxy]) + proxy_cmd = ( + "ssh -o ConnectTimeout=10 -o BatchMode=yes" + " -W %h:%p admin@bastion.example.com" + ) + assert args == [ + "ssh", + *_DEFAULT_O_OPTIONS, + "-o", + f"ProxyCommand={proxy_cmd}", + "target.example.com", + ] + + def test_proxy_jump_no_user(self) -> None: + server = SshEndpoint( + slug="target", + host="target.example.com", + ) + proxy = SshEndpoint( + slug="bastion", + host="bastion.example.com", + port=2222, + ) + args = build_ssh_base_args(server, [proxy]) + proxy_cmd = ( + "ssh -o ConnectTimeout=10 -o BatchMode=yes" + " -p 2222" + " -W %h:%p bastion.example.com" + ) + assert args == [ + "ssh", + *_DEFAULT_O_OPTIONS, + "-o", + f"ProxyCommand={proxy_cmd}", + "target.example.com", + ] + + def test_proxy_chain_multi_hop(self) -> None: + server = SshEndpoint( + slug="target", + host="target.example.com", + ) + proxy1 = SshEndpoint( + slug="bastion1", + host="bastion1.example.com", + user="user1", + ) + proxy2 = SshEndpoint( + slug="bastion2", + host="bastion2.example.com", + port=2222, + ) + args = build_ssh_base_args(server, [proxy1, proxy2]) + inner = ( + "ssh -o ConnectTimeout=10 -o BatchMode=yes" + " -W %%h:%%p user1@bastion1.example.com" + ) + proxy_cmd = ( + "ssh -o ConnectTimeout=10 -o BatchMode=yes" + f" -o ProxyCommand={inner}" + " -p 2222" + " -W %h:%p bastion2.example.com" + ) + assert args == [ + "ssh", + *_DEFAULT_O_OPTIONS, + "-o", + f"ProxyCommand={proxy_cmd}", + "target.example.com", + ] + + def test_proxy_chain_empty(self) -> None: + server = SshEndpoint( + slug="target", + host="target.example.com", + ) + args = build_ssh_base_args(server, []) + assert args == [ + "ssh", + *_DEFAULT_O_OPTIONS, + "target.example.com", + ] + + +_DEFAULT_E_PREFIX = "ssh -o ConnectTimeout=10 -o BatchMode=yes" + + +class TestBuildSshEOption: + def test_minimal(self, ssh_endpoint_minimal: SshEndpoint) -> None: + result = build_ssh_e_option(ssh_endpoint_minimal) + assert result == ["-e", _DEFAULT_E_PREFIX] + + def test_full(self, ssh_endpoint: SshEndpoint) -> None: + result = build_ssh_e_option(ssh_endpoint) + assert result == [ + "-e", + f"{_DEFAULT_E_PREFIX} -p 5022 -i ~/.ssh/key", + ] + + def test_with_ssh_options(self) -> None: + server = SshEndpoint( + slug="host-server", + host="host.example.com", + connection_options=SshConnectionOptions( + strict_host_key_checking=False, + ), + ) + result = build_ssh_e_option(server) + assert result == [ + "-e", + f"{_DEFAULT_E_PREFIX}" " -o StrictHostKeyChecking=no", + ] + + def test_proxy_jump(self) -> None: + server = SshEndpoint( + slug="target", + host="target.example.com", + ) + proxy = SshEndpoint( + slug="bastion", + host="bastion.example.com", + port=2222, + user="admin", + ) + result = build_ssh_e_option(server, [proxy]) + proxy_cmd = ( + "ssh -o ConnectTimeout=10 -o BatchMode=yes" + " -p 2222" + " -W %h:%p admin@bastion.example.com" + ) + import shlex + + quoted = shlex.quote(f"ProxyCommand={proxy_cmd}") + assert result == [ + "-e", + f"{_DEFAULT_E_PREFIX} -o {quoted}", + ] + + def test_proxy_chain_multi_hop(self) -> None: + server = SshEndpoint( + slug="target", + host="target.example.com", + ) + proxy1 = SshEndpoint( + slug="bastion1", + host="bastion1.example.com", + user="user1", + ) + proxy2 = SshEndpoint( + slug="bastion2", + host="bastion2.example.com", + port=2222, + ) + result = build_ssh_e_option(server, [proxy1, proxy2]) + inner = ( + "ssh -o ConnectTimeout=10 -o BatchMode=yes" + " -W %%h:%%p user1@bastion1.example.com" + ) + proxy_cmd = ( + "ssh -o ConnectTimeout=10 -o BatchMode=yes" + f" -o ProxyCommand={inner}" + " -p 2222" + " -W %h:%p bastion2.example.com" + ) + import shlex + + quoted = shlex.quote(f"ProxyCommand={proxy_cmd}") + assert result == [ + "-e", + f"{_DEFAULT_E_PREFIX} -o {quoted}", + ] + + +class TestFormatRemotePath: + def test_with_user(self, ssh_endpoint: SshEndpoint) -> None: + result = format_remote_path(ssh_endpoint, "/data") + assert result == "backup@nas.example.com:/data" + + def test_without_user(self, ssh_endpoint_minimal: SshEndpoint) -> None: + result = format_remote_path(ssh_endpoint_minimal, "/data") + assert result == "nas2.example.com:/data" + + +class TestRunRemoteCommand: + @patch("nbkp.remote.fabricssh.paramiko") + @patch("nbkp.remote.fabricssh.Connection") + def test_run_remote_command( + self, + mock_conn_cls: MagicMock, + mock_paramiko: MagicMock, + ssh_endpoint: SshEndpoint, + ) -> None: + mock_conn = mock_conn_cls.return_value.__enter__.return_value + mock_result = MagicMock(exited=0, stdout="file1\nfile2\n", stderr="") + mock_conn.run.return_value = mock_result + + result = run_remote_command(ssh_endpoint, ["ls", "/tmp"]) + + mock_conn_cls.assert_called_once_with( + host="nas.example.com", + port=5022, + user="backup", + connect_kwargs={ + "allow_agent": True, + "look_for_keys": True, + "compress": False, + "key_filename": "~/.ssh/key", + }, + connect_timeout=10, + forward_agent=False, + gateway=None, + ) + mock_conn.run.assert_called_once_with( + "ls /tmp", + warn=True, + hide=True, + in_stream=False, + ) + assert result.returncode == 0 + assert result.stdout == "file1\nfile2\n" + assert result.stderr == "" + + @patch("nbkp.remote.fabricssh.paramiko") + @patch("nbkp.remote.fabricssh.Connection") + def test_channel_timeout_and_disabled_algorithms( + self, + mock_conn_cls: MagicMock, + mock_paramiko: MagicMock, + ) -> None: + server = SshEndpoint( + slug="advanced", + host="host.example.com", + connection_options=SshConnectionOptions( + channel_timeout=30.0, + disabled_algorithms={ + "ciphers": ["aes128-cbc"], + }, + ), + ) + mock_conn = mock_conn_cls.return_value.__enter__.return_value + mock_result = MagicMock(exited=0, stdout="ok\n", stderr="") + mock_conn.run.return_value = mock_result + + run_remote_command(server, ["echo", "ok"]) + + mock_conn_cls.assert_called_once_with( + host="host.example.com", + port=22, + user=None, + connect_kwargs={ + "allow_agent": True, + "look_for_keys": True, + "compress": False, + "channel_timeout": 30.0, + "disabled_algorithms": { + "ciphers": ["aes128-cbc"], + }, + }, + connect_timeout=10, + forward_agent=False, + gateway=None, + ) + + @patch("nbkp.remote.fabricssh.paramiko") + @patch("nbkp.remote.fabricssh.Connection") + def test_server_alive_interval_calls_set_keepalive( + self, + mock_conn_cls: MagicMock, + mock_paramiko: MagicMock, + ) -> None: + server = SshEndpoint( + slug="keepalive", + host="host.example.com", + connection_options=SshConnectionOptions( + server_alive_interval=60, + ), + ) + mock_conn = mock_conn_cls.return_value.__enter__.return_value + mock_result = MagicMock(exited=0, stdout="ok\n", stderr="") + mock_conn.run.return_value = mock_result + + run_remote_command(server, ["echo", "ok"]) + + mock_conn.transport.set_keepalive.assert_called_once_with(60) + + @patch("nbkp.remote.fabricssh.paramiko") + @patch("nbkp.remote.fabricssh.Connection") + def test_proxy_server_creates_gateway( + self, + mock_conn_cls: MagicMock, + mock_paramiko: MagicMock, + ) -> None: + server = SshEndpoint( + slug="target", + host="target.example.com", + ) + proxy = SshEndpoint( + slug="bastion", + host="bastion.example.com", + user="admin", + ) + mock_conn = mock_conn_cls.return_value.__enter__.return_value + mock_result = MagicMock(exited=0, stdout="ok\n", stderr="") + mock_conn.run.return_value = mock_result + + run_remote_command(server, ["echo", "ok"], [proxy]) + + # Connection is called twice: once for proxy, once + # for target + assert mock_conn_cls.call_count == 2 + target_call = mock_conn_cls.call_args_list[1] + assert target_call.kwargs["gateway"] is not None diff --git a/tests/sync/__init__.py b/tests/sync/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/sync/test_btrfs.py b/tests/sync/test_btrfs.py new file mode 100644 index 0000000..1a85616 --- /dev/null +++ b/tests/sync/test_btrfs.py @@ -0,0 +1,503 @@ +"""Tests for nbkp.btrfs.""" + +from __future__ import annotations + +from unittest.mock import MagicMock, call, patch + +import pytest + +from nbkp.sync.btrfs import ( + create_snapshot, + delete_snapshot, + get_latest_snapshot, + list_snapshots, + prune_snapshots, +) +from nbkp.config import ( + BtrfsSnapshotConfig, + Config, + DestinationSyncEndpoint, + LocalVolume, + RemoteVolume, + SshEndpoint, + SyncConfig, + SyncEndpoint, + resolve_all_endpoints, +) + + +def _local_config() -> tuple[Config, SyncConfig]: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + subdir="backup", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + return config, sync + + +def _remote_config() -> tuple[Config, SyncConfig]: + src = LocalVolume(slug="src", path="/mnt/src") + dst_server = SshEndpoint( + slug="nas-server", + host="nas.local", + user="admin", + ) + dst = RemoteVolume( + slug="dst", + ssh_endpoint="nas-server", + path="/backup", + ) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + subdir="data", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + config = Config( + ssh_endpoints={"nas-server": dst_server}, + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + return config, sync + + +class TestCreateSnapshotLocal: + @patch("nbkp.sync.btrfs.subprocess.run") + def test_success(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=0, stderr="") + config, sync = _local_config() + from datetime import datetime, timezone + + fixed_now = datetime(2024, 1, 15, 12, 0, 0, 0, tzinfo=timezone.utc) + path = create_snapshot(sync, config, now=fixed_now) + assert path == ("/mnt/dst/backup/snapshots/2024-01-15T12:00:00.000Z") + mock_run.assert_called_once() + call_args = mock_run.call_args[0][0] + assert call_args == [ + "btrfs", + "subvolume", + "snapshot", + "-r", + "/mnt/dst/backup/latest", + "/mnt/dst/backup/snapshots/2024-01-15T12:00:00.000Z", + ] + + @patch("nbkp.sync.btrfs.subprocess.run") + def test_failure(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock( + returncode=1, stderr="permission denied" + ) + config, sync = _local_config() + from datetime import datetime, timezone + + fixed_now = datetime(2024, 1, 15, 12, 0, 0, 0, tzinfo=timezone.utc) + with pytest.raises(RuntimeError, match="btrfs snapshot"): + create_snapshot(sync, config, now=fixed_now) + + +class TestCreateSnapshotRemote: + @patch("nbkp.sync.btrfs.run_remote_command") + def test_success(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=0, stderr="") + config, sync = _remote_config() + resolved = resolve_all_endpoints(config) + from datetime import datetime, timezone + + fixed_now = datetime(2024, 1, 15, 12, 0, 0, 0, tzinfo=timezone.utc) + path = create_snapshot( + sync, config, now=fixed_now, resolved_endpoints=resolved + ) + assert path == ("/backup/data/snapshots/2024-01-15T12:00:00.000Z") + mock_run.assert_called_once() + call_args = mock_run.call_args + assert call_args[0][0] == config.ssh_endpoints["nas-server"] + assert call_args[0][1] == [ + "btrfs", + "subvolume", + "snapshot", + "-r", + "/backup/data/latest", + "/backup/data/snapshots/2024-01-15T12:00:00.000Z", + ] + + +class TestGetLatestSnapshotLocal: + @patch("nbkp.sync.btrfs.subprocess.run") + def test_found(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock( + returncode=0, + stdout="20240101T000000Z\n20240115T120000Z\n", + ) + config, sync = _local_config() + + result = get_latest_snapshot(sync, config) + assert result == ("/mnt/dst/backup/snapshots/20240115T120000Z") + + @patch("nbkp.sync.btrfs.subprocess.run") + def test_empty(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=0, stdout="") + config, sync = _local_config() + + result = get_latest_snapshot(sync, config) + assert result is None + + @patch("nbkp.sync.btrfs.subprocess.run") + def test_dir_missing(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=2, stdout="") + config, sync = _local_config() + + result = get_latest_snapshot(sync, config) + assert result is None + + +class TestGetLatestSnapshotRemote: + @patch("nbkp.sync.btrfs.run_remote_command") + def test_found(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock( + returncode=0, + stdout="20240101T000000Z\n20240115T120000Z\n", + ) + config, sync = _remote_config() + resolved = resolve_all_endpoints(config) + + result = get_latest_snapshot(sync, config, resolved) + assert result == ("/backup/data/snapshots/20240115T120000Z") + mock_run.assert_called_once_with( + config.ssh_endpoints["nas-server"], + ["ls", "/backup/data/snapshots"], + [], + ) + + +def _local_config_spaces() -> tuple[Config, SyncConfig]: + src = LocalVolume(slug="src", path="/mnt/my src") + dst = LocalVolume(slug="dst", path="/mnt/my dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + subdir="my backup", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + return config, sync + + +def _remote_config_spaces() -> tuple[Config, SyncConfig]: + src = LocalVolume(slug="src", path="/mnt/my src") + dst_server = SshEndpoint( + slug="nas-server", + host="nas.local", + user="admin", + ) + dst = RemoteVolume( + slug="dst", + ssh_endpoint="nas-server", + path="/my backup", + ) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + subdir="my data", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + config = Config( + ssh_endpoints={"nas-server": dst_server}, + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + return config, sync + + +class TestCreateSnapshotLocalSpaces: + @patch("nbkp.sync.btrfs.subprocess.run") + def test_success(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=0, stderr="") + config, sync = _local_config_spaces() + from datetime import datetime, timezone + + fixed_now = datetime(2024, 1, 15, 12, 0, 0, 0, tzinfo=timezone.utc) + path = create_snapshot(sync, config, now=fixed_now) + assert path == ( + "/mnt/my dst/my backup/snapshots/" "2024-01-15T12:00:00.000Z" + ) + call_args = mock_run.call_args[0][0] + assert call_args == [ + "btrfs", + "subvolume", + "snapshot", + "-r", + "/mnt/my dst/my backup/latest", + "/mnt/my dst/my backup/snapshots/" "2024-01-15T12:00:00.000Z", + ] + + +class TestCreateSnapshotRemoteSpaces: + @patch("nbkp.sync.btrfs.run_remote_command") + def test_success(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=0, stderr="") + config, sync = _remote_config_spaces() + resolved = resolve_all_endpoints(config) + from datetime import datetime, timezone + + fixed_now = datetime(2024, 1, 15, 12, 0, 0, 0, tzinfo=timezone.utc) + path = create_snapshot( + sync, config, now=fixed_now, resolved_endpoints=resolved + ) + assert path == ( + "/my backup/my data/snapshots/" "2024-01-15T12:00:00.000Z" + ) + call_args = mock_run.call_args + assert call_args[0][1] == [ + "btrfs", + "subvolume", + "snapshot", + "-r", + "/my backup/my data/latest", + "/my backup/my data/snapshots/" "2024-01-15T12:00:00.000Z", + ] + + +class TestGetLatestSnapshotRemoteSpaces: + @patch("nbkp.sync.btrfs.run_remote_command") + def test_found(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock( + returncode=0, + stdout="20240101T000000Z\n20240115T120000Z\n", + ) + config, sync = _remote_config_spaces() + resolved = resolve_all_endpoints(config) + + result = get_latest_snapshot(sync, config, resolved) + assert result == ("/my backup/my data/snapshots/20240115T120000Z") + mock_run.assert_called_once_with( + config.ssh_endpoints["nas-server"], + ["ls", "/my backup/my data/snapshots"], + [], + ) + + +class TestListSnapshotsLocal: + @patch("nbkp.sync.btrfs.subprocess.run") + def test_found(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock( + returncode=0, + stdout="20240101T000000Z\n20240115T120000Z\n", + ) + config, sync = _local_config() + + result = list_snapshots(sync, config) + assert result == [ + "/mnt/dst/backup/snapshots/20240101T000000Z", + "/mnt/dst/backup/snapshots/20240115T120000Z", + ] + + @patch("nbkp.sync.btrfs.subprocess.run") + def test_empty(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=0, stdout="") + config, sync = _local_config() + + result = list_snapshots(sync, config) + assert result == [] + + @patch("nbkp.sync.btrfs.subprocess.run") + def test_dir_missing(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=2, stdout="") + config, sync = _local_config() + + result = list_snapshots(sync, config) + assert result == [] + + +class TestListSnapshotsRemote: + @patch("nbkp.sync.btrfs.run_remote_command") + def test_found(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock( + returncode=0, + stdout="20240101T000000Z\n20240115T120000Z\n", + ) + config, sync = _remote_config() + resolved = resolve_all_endpoints(config) + + result = list_snapshots(sync, config, resolved) + assert result == [ + "/backup/data/snapshots/20240101T000000Z", + "/backup/data/snapshots/20240115T120000Z", + ] + + +class TestDeleteSnapshotLocal: + @patch("nbkp.sync.btrfs.subprocess.run") + def test_success(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=0, stderr="") + config, _ = _local_config() + dst_vol = config.volumes["dst"] + path = "/mnt/dst/backup/snapshots/20240101T000000Z" + + delete_snapshot(path, dst_vol, {}) + assert mock_run.call_count == 2 + mock_run.assert_has_calls( + [ + call( + ["btrfs", "property", "set", path, "ro", "false"], + capture_output=True, + text=True, + ), + call( + ["btrfs", "subvolume", "delete", path], + capture_output=True, + text=True, + ), + ] + ) + + @patch("nbkp.sync.btrfs.subprocess.run") + def test_failure_on_property_set(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock( + returncode=1, stderr="permission denied" + ) + config, _ = _local_config() + dst_vol = config.volumes["dst"] + + with pytest.raises(RuntimeError, match="btrfs property set ro=false"): + delete_snapshot( + "/mnt/dst/backup/snapshots/20240101T000000Z", + dst_vol, + {}, + ) + + @patch("nbkp.sync.btrfs.subprocess.run") + def test_failure_on_delete(self, mock_run: MagicMock) -> None: + mock_run.side_effect = [ + MagicMock(returncode=0, stderr=""), + MagicMock(returncode=1, stderr="permission denied"), + ] + config, _ = _local_config() + dst_vol = config.volumes["dst"] + + with pytest.raises(RuntimeError, match="btrfs delete"): + delete_snapshot( + "/mnt/dst/backup/snapshots/20240101T000000Z", + dst_vol, + {}, + ) + + +class TestDeleteSnapshotRemote: + @patch("nbkp.sync.btrfs.run_remote_command") + def test_success(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=0, stderr="") + config, _ = _remote_config() + resolved = resolve_all_endpoints(config) + dst_vol = config.volumes["dst"] + path = "/backup/data/snapshots/20240101T000000Z" + server = config.ssh_endpoints["nas-server"] + + delete_snapshot(path, dst_vol, resolved) + assert mock_run.call_count == 2 + mock_run.assert_has_calls( + [ + call( + server, + ["btrfs", "property", "set", path, "ro", "false"], + [], + ), + call( + server, + ["btrfs", "subvolume", "delete", path], + [], + ), + ] + ) + + +class TestPruneSnapshotsLocal: + @patch("nbkp.sync.btrfs.subprocess.run") + def test_prunes_oldest(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock( + returncode=0, + stdout="20240101T000000Z\n20240102T000000Z\n20240103T000000Z\n", + stderr="", + ) + config, sync = _local_config() + + deleted = prune_snapshots(sync, config, max_snapshots=1) + assert deleted == [ + "/mnt/dst/backup/snapshots/20240101T000000Z", + "/mnt/dst/backup/snapshots/20240102T000000Z", + ] + # ls call + 2 × (property set + delete) calls + assert mock_run.call_count == 5 + + @patch("nbkp.sync.btrfs.subprocess.run") + def test_nothing_to_prune(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock( + returncode=0, + stdout="20240101T000000Z\n20240102T000000Z\n", + stderr="", + ) + config, sync = _local_config() + + deleted = prune_snapshots(sync, config, max_snapshots=5) + assert deleted == [] + # Only the ls call + assert mock_run.call_count == 1 + + @patch("nbkp.sync.btrfs.subprocess.run") + def test_dry_run(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock( + returncode=0, + stdout="20240101T000000Z\n20240102T000000Z\n20240103T000000Z\n", + stderr="", + ) + config, sync = _local_config() + + deleted = prune_snapshots(sync, config, max_snapshots=1, dry_run=True) + assert deleted == [ + "/mnt/dst/backup/snapshots/20240101T000000Z", + "/mnt/dst/backup/snapshots/20240102T000000Z", + ] + # Only the ls call, no delete calls + assert mock_run.call_count == 1 + + +class TestPruneSnapshotsRemote: + @patch("nbkp.sync.btrfs.run_remote_command") + def test_prunes_oldest(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock( + returncode=0, + stdout="20240101T000000Z\n20240102T000000Z\n20240103T000000Z\n", + stderr="", + ) + config, sync = _remote_config() + resolved = resolve_all_endpoints(config) + + deleted = prune_snapshots( + sync, config, max_snapshots=2, resolved_endpoints=resolved + ) + assert deleted == [ + "/backup/data/snapshots/20240101T000000Z", + ] + # ls call + 1 × (property set + delete) calls + assert mock_run.call_count == 3 diff --git a/tests/sync/test_hardlinks.py b/tests/sync/test_hardlinks.py new file mode 100644 index 0000000..b1fa423 --- /dev/null +++ b/tests/sync/test_hardlinks.py @@ -0,0 +1,456 @@ +"""Tests for nbkp.sync.hardlinks.""" + +from __future__ import annotations + +from datetime import datetime, timezone +from pathlib import Path +from unittest.mock import MagicMock, patch + +import pytest + +from nbkp.config import ( + Config, + DestinationSyncEndpoint, + HardLinkSnapshotConfig, + LocalVolume, + RemoteVolume, + ResolvedEndpoint, + SshEndpoint, + SyncConfig, + SyncEndpoint, +) +from nbkp.sync.hardlinks import ( + cleanup_orphaned_snapshots, + create_snapshot_dir, + delete_snapshot, + prune_snapshots, + read_latest_symlink, + update_latest_symlink, +) + +_NOW = datetime(2026, 2, 21, 12, 0, 0, tzinfo=timezone.utc) +_TS = "2026-02-21T12:00:00.000Z" + + +def _local_config() -> tuple[SyncConfig, Config]: + src = LocalVolume(slug="src", path="/src") + dst = LocalVolume(slug="dst", path="/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + hard_link_snapshots=HardLinkSnapshotConfig( + enabled=True, max_snapshots=5 + ), + ), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + return sync, config + + +def _remote_config() -> tuple[SyncConfig, Config, dict[str, ResolvedEndpoint]]: + server = SshEndpoint(slug="nas", host="nas.local", user="backup") + src = LocalVolume(slug="src", path="/src") + dst = RemoteVolume(slug="dst", ssh_endpoint="nas", path="/backup") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + hard_link_snapshots=HardLinkSnapshotConfig( + enabled=True, max_snapshots=3 + ), + ), + ) + config = Config( + ssh_endpoints={"nas": server}, + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + re = {"dst": ResolvedEndpoint(server=server)} + return sync, config, re + + +class TestCreateSnapshotDir: + @patch("nbkp.sync.hardlinks.subprocess.run") + def test_local(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=0, stderr="") + sync, config = _local_config() + + path = create_snapshot_dir(sync, config, now=_NOW) + + assert path == f"/dst/snapshots/{_TS}" + mock_run.assert_called_once() + cmd = mock_run.call_args[0][0] + assert cmd == ["mkdir", "-p", f"/dst/snapshots/{_TS}"] + + @patch("nbkp.sync.hardlinks.run_remote_command") + def test_remote(self, mock_remote: MagicMock) -> None: + mock_remote.return_value = MagicMock(returncode=0, stderr="") + sync, config, re = _remote_config() + + path = create_snapshot_dir( + sync, config, now=_NOW, resolved_endpoints=re + ) + + assert path == f"/backup/snapshots/{_TS}" + mock_remote.assert_called_once() + + @patch("nbkp.sync.hardlinks.subprocess.run") + def test_failure_raises(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock( + returncode=1, stderr="permission denied" + ) + sync, config = _local_config() + + with pytest.raises(RuntimeError, match="mkdir"): + create_snapshot_dir(sync, config, now=_NOW) + + +class TestReadLatestSymlink: + def test_local_exists(self, tmp_path: Path) -> None: + dst = LocalVolume(slug="dst", path=str(tmp_path)) + src = LocalVolume(slug="src", path="/src") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + hard_link_snapshots=HardLinkSnapshotConfig(enabled=True), + ), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + latest = tmp_path / "latest" + latest.symlink_to("snapshots/2026-02-21T12:00:00.000Z") + + result = read_latest_symlink(sync, config) + assert result == "2026-02-21T12:00:00.000Z" + + def test_local_missing(self, tmp_path: Path) -> None: + dst = LocalVolume(slug="dst", path=str(tmp_path)) + src = LocalVolume(slug="src", path="/src") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + hard_link_snapshots=HardLinkSnapshotConfig(enabled=True), + ), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + result = read_latest_symlink(sync, config) + assert result is None + + @patch("nbkp.sync.hardlinks.run_remote_command") + def test_remote_exists(self, mock_remote: MagicMock) -> None: + mock_remote.return_value = MagicMock( + returncode=0, + stdout="snapshots/2026-02-21T12:00:00.000Z\n", + ) + sync, config, re = _remote_config() + + result = read_latest_symlink(sync, config, resolved_endpoints=re) + assert result == "2026-02-21T12:00:00.000Z" + + @patch("nbkp.sync.hardlinks.run_remote_command") + def test_remote_missing(self, mock_remote: MagicMock) -> None: + mock_remote.return_value = MagicMock(returncode=1, stdout="") + sync, config, re = _remote_config() + + result = read_latest_symlink(sync, config, resolved_endpoints=re) + assert result is None + + +class TestUpdateLatestSymlink: + def test_local(self, tmp_path: Path) -> None: + dst = LocalVolume(slug="dst", path=str(tmp_path)) + src = LocalVolume(slug="src", path="/src") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + hard_link_snapshots=HardLinkSnapshotConfig(enabled=True), + ), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + update_latest_symlink(sync, config, _TS) + link = tmp_path / "latest" + assert link.is_symlink() + assert str(link.readlink()) == f"snapshots/{_TS}" + + @patch("nbkp.sync.hardlinks.run_remote_command") + def test_remote(self, mock_remote: MagicMock) -> None: + mock_remote.return_value = MagicMock(returncode=0, stderr="") + sync, config, re = _remote_config() + + update_latest_symlink(sync, config, _TS, resolved_endpoints=re) + mock_remote.assert_called_once() + cmd = mock_remote.call_args[0][1] + assert "ln" in cmd + assert f"snapshots/{_TS}" in cmd + + +class TestCleanupOrphanedSnapshots: + @patch("nbkp.sync.hardlinks.list_snapshots") + def test_deletes_orphans( + self, mock_list: MagicMock, tmp_path: Path + ) -> None: + dst = LocalVolume(slug="dst", path=str(tmp_path)) + src = LocalVolume(slug="src", path="/src") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + hard_link_snapshots=HardLinkSnapshotConfig(enabled=True), + ), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + # Create snapshots dir and symlink + snaps = tmp_path / "snapshots" + snaps.mkdir() + (snaps / "T1").mkdir() + (snaps / "T2").mkdir() # orphan + latest = tmp_path / "latest" + latest.symlink_to("snapshots/T1") + + mock_list.return_value = [ + f"{tmp_path}/snapshots/T1", + f"{tmp_path}/snapshots/T2", + ] + + deleted = cleanup_orphaned_snapshots(sync, config) + assert len(deleted) == 1 + assert "T2" in deleted[0] + assert not (snaps / "T2").exists() + + @patch("nbkp.sync.hardlinks.list_snapshots") + def test_no_orphans(self, mock_list: MagicMock, tmp_path: Path) -> None: + dst = LocalVolume(slug="dst", path=str(tmp_path)) + src = LocalVolume(slug="src", path="/src") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + hard_link_snapshots=HardLinkSnapshotConfig(enabled=True), + ), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + latest = tmp_path / "latest" + latest.symlink_to("snapshots/T2") + + mock_list.return_value = [ + f"{tmp_path}/snapshots/T1", + f"{tmp_path}/snapshots/T2", + ] + + deleted = cleanup_orphaned_snapshots(sync, config) + assert deleted == [] + + @patch("nbkp.sync.hardlinks.list_snapshots") + def test_no_latest_symlink( + self, mock_list: MagicMock, tmp_path: Path + ) -> None: + dst = LocalVolume(slug="dst", path=str(tmp_path)) + src = LocalVolume(slug="src", path="/src") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + hard_link_snapshots=HardLinkSnapshotConfig(enabled=True), + ), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + deleted = cleanup_orphaned_snapshots(sync, config) + assert deleted == [] + mock_list.assert_not_called() + + +class TestDeleteSnapshot: + def test_local(self, tmp_path: Path) -> None: + snap = tmp_path / "snap1" + snap.mkdir() + (snap / "file.txt").write_text("data") + vol = LocalVolume(slug="dst", path=str(tmp_path)) + + delete_snapshot(str(snap), vol, {}) + assert not snap.exists() + + @patch("nbkp.sync.hardlinks.run_remote_command") + def test_remote(self, mock_remote: MagicMock) -> None: + mock_remote.return_value = MagicMock(returncode=0, stderr="") + server = SshEndpoint(slug="nas", host="nas.local", user="backup") + vol = RemoteVolume(slug="dst", ssh_endpoint="nas", path="/backup") + re = {"dst": ResolvedEndpoint(server=server)} + + delete_snapshot("/backup/snapshots/T1", vol, re) + mock_remote.assert_called_once() + cmd = mock_remote.call_args[0][1] + assert cmd == ["rm", "-rf", "/backup/snapshots/T1"] + + +class TestPruneSnapshots: + @patch("nbkp.sync.hardlinks.list_snapshots") + def test_prune_excess(self, mock_list: MagicMock, tmp_path: Path) -> None: + dst = LocalVolume(slug="dst", path=str(tmp_path)) + src = LocalVolume(slug="src", path="/src") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + hard_link_snapshots=HardLinkSnapshotConfig( + enabled=True, max_snapshots=2 + ), + ), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + # Create snapshots + latest symlink + snaps = tmp_path / "snapshots" + snaps.mkdir() + for name in ["T1", "T2", "T3"]: + (snaps / name).mkdir() + latest = tmp_path / "latest" + latest.symlink_to("snapshots/T3") + + mock_list.return_value = [ + f"{tmp_path}/snapshots/T1", + f"{tmp_path}/snapshots/T2", + f"{tmp_path}/snapshots/T3", + ] + + deleted = prune_snapshots(sync, config, 2) + assert len(deleted) == 1 + assert "T1" in deleted[0] + + @patch("nbkp.sync.hardlinks.list_snapshots") + def test_never_prunes_latest( + self, mock_list: MagicMock, tmp_path: Path + ) -> None: + dst = LocalVolume(slug="dst", path=str(tmp_path)) + src = LocalVolume(slug="src", path="/src") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + hard_link_snapshots=HardLinkSnapshotConfig( + enabled=True, max_snapshots=1 + ), + ), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + snaps = tmp_path / "snapshots" + snaps.mkdir() + for name in ["T1", "T2"]: + (snaps / name).mkdir() + latest = tmp_path / "latest" + latest.symlink_to("snapshots/T1") + + mock_list.return_value = [ + f"{tmp_path}/snapshots/T1", + f"{tmp_path}/snapshots/T2", + ] + + # max_snapshots=1, but T1 is latest so only T2 pruned + deleted = prune_snapshots(sync, config, 1) + assert len(deleted) == 1 + assert "T2" in deleted[0] + assert (snaps / "T1").exists() + + @patch("nbkp.sync.hardlinks.list_snapshots") + def test_dry_run(self, mock_list: MagicMock, tmp_path: Path) -> None: + dst = LocalVolume(slug="dst", path=str(tmp_path)) + src = LocalVolume(slug="src", path="/src") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + hard_link_snapshots=HardLinkSnapshotConfig(enabled=True), + ), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + snaps = tmp_path / "snapshots" + snaps.mkdir() + for name in ["T1", "T2", "T3"]: + (snaps / name).mkdir() + latest = tmp_path / "latest" + latest.symlink_to("snapshots/T3") + + mock_list.return_value = [ + f"{tmp_path}/snapshots/T1", + f"{tmp_path}/snapshots/T2", + f"{tmp_path}/snapshots/T3", + ] + + deleted = prune_snapshots(sync, config, 1, dry_run=True) + assert len(deleted) == 2 + # Dry run: directories still exist + assert (snaps / "T1").exists() + assert (snaps / "T2").exists() + + @patch("nbkp.sync.hardlinks.list_snapshots") + def test_no_excess(self, mock_list: MagicMock, tmp_path: Path) -> None: + dst = LocalVolume(slug="dst", path=str(tmp_path)) + src = LocalVolume(slug="src", path="/src") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + hard_link_snapshots=HardLinkSnapshotConfig(enabled=True), + ), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + mock_list.return_value = [ + f"{tmp_path}/snapshots/T1", + ] + + deleted = prune_snapshots(sync, config, 5) + assert deleted == [] diff --git a/tests/sync/test_rsync.py b/tests/sync/test_rsync.py new file mode 100644 index 0000000..ecae2ab --- /dev/null +++ b/tests/sync/test_rsync.py @@ -0,0 +1,1179 @@ +"""Tests for nbkp.rsync.""" + +from __future__ import annotations + +import io +from unittest.mock import MagicMock, patch + +from nbkp.config import ( + BtrfsSnapshotConfig, + Config, + DestinationSyncEndpoint, + HardLinkSnapshotConfig, + LocalVolume, + RemoteVolume, + RsyncOptions, + SshEndpoint, + SyncConfig, + SyncEndpoint, + resolve_all_endpoints, +) +from nbkp.sync.rsync import ProgressMode, build_rsync_command, run_rsync + + +class TestBuildRsyncCommandLocalToLocal: + def test_basic(self) -> None: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="photos"), + destination=DestinationSyncEndpoint(volume="dst", subdir="backup"), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + cmd = build_rsync_command(sync, config) + assert cmd == [ + "rsync", + "-a", + "--delete", + "--delete-excluded", + "--partial-dir=.rsync-partial", + "--safe-links", + "--filter=H .nbkp-*", + "--filter=P .nbkp-*", + "--checksum", + "/mnt/src/photos/", + "/mnt/dst/backup/", + ] + + def test_dry_run(self) -> None: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + cmd = build_rsync_command(sync, config, dry_run=True) + assert "--dry-run" in cmd + + def test_link_dest(self) -> None: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + cmd = build_rsync_command( + sync, + config, + link_dest="../../snapshots/20240101T000000Z", + ) + assert "--link-dest=../../snapshots/20240101T000000Z" in cmd + + +class TestBuildRsyncCommandLocalToRemote: + def test_basic(self) -> None: + nas_server = SshEndpoint( + slug="nas-server", + host="nas.local", + port=5022, + user="backup", + key="~/.ssh/key", + ) + src = LocalVolume(slug="src", path="/mnt/src") + dst = RemoteVolume( + slug="dst", + ssh_endpoint="nas-server", + path="/backup", + ) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="photos"), + destination=DestinationSyncEndpoint(volume="dst", subdir="photos"), + ) + config = Config( + ssh_endpoints={"nas-server": nas_server}, + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + resolved = resolve_all_endpoints(config) + + cmd = build_rsync_command(sync, config, resolved_endpoints=resolved) + assert cmd == [ + "rsync", + "-a", + "--delete", + "--delete-excluded", + "--partial-dir=.rsync-partial", + "--safe-links", + "--filter=H .nbkp-*", + "--filter=P .nbkp-*", + "--checksum", + "-e", + "ssh -o ConnectTimeout=10 -o BatchMode=yes" + " -p 5022 -i ~/.ssh/key", + "/mnt/src/photos/", + "backup@nas.local:/backup/photos/", + ] + + +class TestBuildRsyncCommandRemoteToLocal: + def test_basic(self) -> None: + server = SshEndpoint( + slug="server", + host="server.local", + user="admin", + ) + src = RemoteVolume( + slug="src", + ssh_endpoint="server", + path="/data", + ) + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst", subdir="backup"), + ) + config = Config( + ssh_endpoints={"server": server}, + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + resolved = resolve_all_endpoints(config) + + cmd = build_rsync_command(sync, config, resolved_endpoints=resolved) + assert cmd == [ + "rsync", + "-a", + "--delete", + "--delete-excluded", + "--partial-dir=.rsync-partial", + "--safe-links", + "--filter=H .nbkp-*", + "--filter=P .nbkp-*", + "--checksum", + "-e", + "ssh -o ConnectTimeout=10 -o BatchMode=yes", + "admin@server.local:/data/", + "/mnt/dst/backup/", + ] + + +class TestBuildRsyncCommandRemoteToRemoteSameServer: + """When both volumes resolve to the same SSH endpoint, + rsync should use local paths (no inner SSH).""" + + def _simple_config( + self, + server: SshEndpoint | None = None, + extra_endpoints: dict[str, SshEndpoint] | None = None, + src_path: str = "/data/src", + dst_path: str = "/data/dst", + src_subdir: str | None = None, + dst_subdir: str | None = None, + **sync_kwargs: object, + ) -> tuple[SyncConfig, Config]: + srv = server or SshEndpoint( + slug="nas", + host="nas.local", + port=5022, + user="backup", + key="~/.ssh/key", + ) + endpoints: dict[str, SshEndpoint] = {"nas": srv} + if extra_endpoints: + endpoints.update(extra_endpoints) + src = RemoteVolume( + slug="src", + ssh_endpoint="nas", + path=src_path, + ) + dst = RemoteVolume( + slug="dst", + ssh_endpoint="nas", + path=dst_path, + ) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir=src_subdir), + destination=DestinationSyncEndpoint( + volume="dst", subdir=dst_subdir + ), + **sync_kwargs, # type: ignore[arg-type] + ) + config = Config( + ssh_endpoints=endpoints, + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + return sync, config + + def test_basic(self) -> None: + sync, config = self._simple_config( + src_subdir="photos", dst_subdir="backup" + ) + resolved = resolve_all_endpoints(config) + + cmd = build_rsync_command(sync, config, resolved_endpoints=resolved) + + assert cmd[0] == "ssh" + assert "backup@nas.local" in cmd + + inner = cmd[-1] + assert inner.startswith("rsync") + # No SSH transport — both paths are local + assert "-e 'ssh" not in inner + assert "backup@nas.local:" not in inner + assert "/data/src/photos/" in inner + assert "/data/dst/backup/" in inner + + def test_dry_run(self) -> None: + sync, config = self._simple_config() + resolved = resolve_all_endpoints(config) + + cmd = build_rsync_command( + sync, config, dry_run=True, resolved_endpoints=resolved + ) + inner = cmd[-1] + assert "--dry-run" in inner + + def test_with_filters(self) -> None: + sync, config = self._simple_config( + filters=["+ *.jpg", "- *.tmp"], + filter_file="/etc/nbkp/filters.rules", + ) + resolved = resolve_all_endpoints(config) + + cmd = build_rsync_command(sync, config, resolved_endpoints=resolved) + inner = cmd[-1] + assert "'--filter=+ *.jpg'" in inner + assert "'--filter=- *.tmp'" in inner + assert "'--filter=merge /etc/nbkp/filters.rules'" in inner + + def test_with_link_dest(self) -> None: + sync, config = self._simple_config() + resolved = resolve_all_endpoints(config) + + cmd = build_rsync_command( + sync, + config, + link_dest="../../snapshots/20240101T000000Z", + resolved_endpoints=resolved, + ) + inner = cmd[-1] + assert "--link-dest=../../snapshots/20240101T000000Z" in inner + + def test_custom_dest_suffix(self) -> None: + sync, config = self._simple_config() + resolved = resolve_all_endpoints(config) + + cmd = build_rsync_command( + sync, + config, + resolved_endpoints=resolved, + dest_suffix="snapshots/T1", + ) + inner = cmd[-1] + assert "/data/dst/snapshots/T1/" in inner + + def test_with_proxy_chain(self) -> None: + bastion = SshEndpoint( + slug="bastion", + host="bastion.example.com", + user="admin", + ) + server = SshEndpoint( + slug="nas", + host="nas.internal", + user="backup", + proxy_jump="bastion", + ) + sync, config = self._simple_config( + server=server, + extra_endpoints={"bastion": bastion}, + ) + resolved = resolve_all_endpoints(config) + + cmd = build_rsync_command(sync, config, resolved_endpoints=resolved) + + assert cmd[0] == "ssh" + assert any("ProxyCommand=" in arg for arg in cmd) + assert "backup@nas.internal" in cmd + + inner = cmd[-1] + assert "-e 'ssh" not in inner + assert "/data/src/" in inner + assert "/data/dst/" in inner + + def test_progress(self) -> None: + sync, config = self._simple_config() + resolved = resolve_all_endpoints(config) + + cmd = build_rsync_command( + sync, + config, + progress=ProgressMode.PER_FILE, + resolved_endpoints=resolved, + ) + inner = cmd[-1] + assert "-v" in inner + assert "--progress" in inner + + +class TestBuildRsyncCommandFilters: + def test_inline_filters(self) -> None: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + filters=["+ *.jpg", "- *.tmp"], + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + cmd = build_rsync_command(sync, config) + assert cmd == [ + "rsync", + "-a", + "--delete", + "--delete-excluded", + "--partial-dir=.rsync-partial", + "--safe-links", + "--filter=H .nbkp-*", + "--filter=P .nbkp-*", + "--checksum", + "--filter=+ *.jpg", + "--filter=- *.tmp", + "/mnt/src/", + "/mnt/dst/", + ] + + def test_filter_file(self) -> None: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + filter_file="/etc/nbkp/filters.rules", + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + cmd = build_rsync_command(sync, config) + assert cmd == [ + "rsync", + "-a", + "--delete", + "--delete-excluded", + "--partial-dir=.rsync-partial", + "--safe-links", + "--filter=H .nbkp-*", + "--filter=P .nbkp-*", + "--checksum", + "--filter=merge /etc/nbkp/filters.rules", + "/mnt/src/", + "/mnt/dst/", + ] + + def test_filters_and_filter_file(self) -> None: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + filters=["+ *.jpg"], + filter_file="/etc/nbkp/filters.rules", + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + cmd = build_rsync_command(sync, config) + assert cmd == [ + "rsync", + "-a", + "--delete", + "--delete-excluded", + "--partial-dir=.rsync-partial", + "--safe-links", + "--filter=H .nbkp-*", + "--filter=P .nbkp-*", + "--checksum", + "--filter=+ *.jpg", + "--filter=merge /etc/nbkp/filters.rules", + "/mnt/src/", + "/mnt/dst/", + ] + + def test_no_filters(self) -> None: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + cmd = build_rsync_command(sync, config) + user_filters = [ + a + for a in cmd + if a.startswith("--filter=") + and not a.startswith("--filter=P") + and not a.startswith("--filter=H") + ] + assert user_filters == [] + + +class TestBuildRsyncCommandOptions: + def _simple_config(self) -> tuple[SyncConfig, Config]: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + return sync, config + + def test_override_default_options(self) -> None: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + rsync_options=RsyncOptions( + default_options_override=["-a"], + checksum=False, + ), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + cmd = build_rsync_command(sync, config) + assert cmd == [ + "rsync", + "-a", + "/mnt/src/", + "/mnt/dst/", + ] + + def test_extra_options(self) -> None: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + rsync_options=RsyncOptions( + extra_options=["--bwlimit=1000"], + checksum=False, + ), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + cmd = build_rsync_command(sync, config) + assert cmd == [ + "rsync", + "-a", + "--delete", + "--delete-excluded", + "--partial-dir=.rsync-partial", + "--safe-links", + "--filter=H .nbkp-*", + "--filter=P .nbkp-*", + "--bwlimit=1000", + "/mnt/src/", + "/mnt/dst/", + ] + + def test_override_and_extra(self) -> None: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + rsync_options=RsyncOptions( + default_options_override=["-a", "--delete"], + extra_options=["--bwlimit=1000"], + checksum=False, + ), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + cmd = build_rsync_command(sync, config) + assert cmd == [ + "rsync", + "-a", + "--delete", + "--bwlimit=1000", + "/mnt/src/", + "/mnt/dst/", + ] + + def test_checksum_default(self) -> None: + sync, config = self._simple_config() + cmd = build_rsync_command(sync, config) + assert "--checksum" in cmd + + def test_checksum_disabled(self) -> None: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + rsync_options=RsyncOptions(checksum=False), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + cmd = build_rsync_command(sync, config) + assert "--checksum" not in cmd + + def test_compress_enabled(self) -> None: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + rsync_options=RsyncOptions(compress=True), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + cmd = build_rsync_command(sync, config) + assert "--compress" in cmd + assert "--checksum" in cmd + + def test_compress_default(self) -> None: + sync, config = self._simple_config() + cmd = build_rsync_command(sync, config) + assert "--compress" not in cmd + + +class TestBuildRsyncCommandProxyJump: + def test_local_to_remote_with_proxy(self) -> None: + bastion = SshEndpoint( + slug="bastion", + host="bastion.example.com", + port=2222, + user="admin", + ) + nas_server = SshEndpoint( + slug="nas-server", + host="nas.local", + port=5022, + user="backup", + key="~/.ssh/key", + proxy_jump="bastion", + ) + src = LocalVolume(slug="src", path="/mnt/src") + dst = RemoteVolume( + slug="dst", + ssh_endpoint="nas-server", + path="/backup", + ) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + ssh_endpoints={ + "bastion": bastion, + "nas-server": nas_server, + }, + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + resolved = resolve_all_endpoints(config) + + cmd = build_rsync_command(sync, config, resolved_endpoints=resolved) + import shlex + + proxy_cmd = ( + "ssh -o ConnectTimeout=10 -o BatchMode=yes" + " -p 2222" + " -W %h:%p admin@bastion.example.com" + ) + quoted = shlex.quote(f"ProxyCommand={proxy_cmd}") + assert cmd == [ + "rsync", + "-a", + "--delete", + "--delete-excluded", + "--partial-dir=.rsync-partial", + "--safe-links", + "--filter=H .nbkp-*", + "--filter=P .nbkp-*", + "--checksum", + "-e", + "ssh -o ConnectTimeout=10 -o BatchMode=yes" + " -p 5022 -i ~/.ssh/key" + f" -o {quoted}", + "/mnt/src/", + "backup@nas.local:/backup/", + ] + + def test_remote_to_local_with_proxy(self) -> None: + bastion = SshEndpoint( + slug="bastion", + host="bastion.example.com", + user="admin", + ) + server = SshEndpoint( + slug="server", + host="server.internal", + user="backup", + proxy_jump="bastion", + ) + src = RemoteVolume( + slug="src", + ssh_endpoint="server", + path="/data", + ) + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + ssh_endpoints={ + "bastion": bastion, + "server": server, + }, + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + resolved = resolve_all_endpoints(config) + + cmd = build_rsync_command(sync, config, resolved_endpoints=resolved) + import shlex + + proxy_cmd = ( + "ssh -o ConnectTimeout=10 -o BatchMode=yes" + " -W %h:%p admin@bastion.example.com" + ) + quoted = shlex.quote(f"ProxyCommand={proxy_cmd}") + assert cmd == [ + "rsync", + "-a", + "--delete", + "--delete-excluded", + "--partial-dir=.rsync-partial", + "--safe-links", + "--filter=H .nbkp-*", + "--filter=P .nbkp-*", + "--checksum", + "-e", + "ssh -o ConnectTimeout=10 -o BatchMode=yes" f" -o {quoted}", + "backup@server.internal:/data/", + "/mnt/dst/", + ] + + +class TestBuildRsyncCommandMultiHopProxy: + def test_local_to_remote_with_multi_hop_proxy(self) -> None: + bastion1 = SshEndpoint( + slug="bastion1", + host="bastion1.example.com", + user="admin", + ) + bastion2 = SshEndpoint( + slug="bastion2", + host="bastion2.example.com", + port=2222, + ) + nas_server = SshEndpoint( + slug="nas-server", + host="nas.local", + port=5022, + user="backup", + key="~/.ssh/key", + proxy_jumps=["bastion1", "bastion2"], + ) + src = LocalVolume(slug="src", path="/mnt/src") + dst = RemoteVolume( + slug="dst", + ssh_endpoint="nas-server", + path="/backup", + ) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + ssh_endpoints={ + "bastion1": bastion1, + "bastion2": bastion2, + "nas-server": nas_server, + }, + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + resolved = resolve_all_endpoints(config) + + cmd = build_rsync_command(sync, config, resolved_endpoints=resolved) + import shlex + + inner = ( + "ssh -o ConnectTimeout=10 -o BatchMode=yes" + " -W %%h:%%p admin@bastion1.example.com" + ) + proxy_cmd = ( + "ssh -o ConnectTimeout=10 -o BatchMode=yes" + f" -o ProxyCommand={inner}" + " -p 2222" + " -W %h:%p bastion2.example.com" + ) + quoted = shlex.quote(f"ProxyCommand={proxy_cmd}") + assert cmd == [ + "rsync", + "-a", + "--delete", + "--delete-excluded", + "--partial-dir=.rsync-partial", + "--safe-links", + "--filter=H .nbkp-*", + "--filter=P .nbkp-*", + "--checksum", + "-e", + "ssh -o ConnectTimeout=10 -o BatchMode=yes" + " -p 5022 -i ~/.ssh/key" + f" -o {quoted}", + "/mnt/src/", + "backup@nas.local:/backup/", + ] + + +class TestBuildRsyncCommandSpacesInPaths: + def test_local_to_local_spaces(self) -> None: + src = LocalVolume(slug="src", path="/mnt/my src") + dst = LocalVolume(slug="dst", path="/mnt/my dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="my photos"), + destination=DestinationSyncEndpoint( + volume="dst", subdir="my backup" + ), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + cmd = build_rsync_command(sync, config) + assert cmd == [ + "rsync", + "-a", + "--delete", + "--delete-excluded", + "--partial-dir=.rsync-partial", + "--safe-links", + "--filter=H .nbkp-*", + "--filter=P .nbkp-*", + "--checksum", + "/mnt/my src/my photos/", + "/mnt/my dst/my backup/", + ] + + +class TestBuildRsyncCommandProgress: + def _simple_config(self) -> tuple[SyncConfig, Config]: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + return sync, config + + def test_no_progress(self) -> None: + sync, config = self._simple_config() + cmd = build_rsync_command(sync, config) + assert "-v" not in cmd + assert "--progress" not in cmd + assert "--info=progress2" not in cmd + assert "--stats" not in cmd + assert "--human-readable" not in cmd + + def test_progress_none(self) -> None: + sync, config = self._simple_config() + cmd = build_rsync_command(sync, config, progress=ProgressMode.NONE) + assert "-v" not in cmd + assert "--progress" not in cmd + assert "--info=progress2" not in cmd + + def test_progress_overall(self) -> None: + sync, config = self._simple_config() + cmd = build_rsync_command(sync, config, progress=ProgressMode.OVERALL) + assert "--info=progress2" in cmd + assert "--stats" in cmd + assert "--human-readable" in cmd + assert "-v" not in cmd + assert "--progress" not in cmd + + def test_progress_per_file(self) -> None: + sync, config = self._simple_config() + cmd = build_rsync_command(sync, config, progress=ProgressMode.PER_FILE) + assert "-v" in cmd + assert "--progress" in cmd + assert "--human-readable" in cmd + assert "--info=progress2" not in cmd + assert "--stats" not in cmd + + def test_progress_full(self) -> None: + sync, config = self._simple_config() + cmd = build_rsync_command(sync, config, progress=ProgressMode.FULL) + assert "-v" in cmd + assert "--progress" in cmd + assert "--info=progress2" in cmd + assert "--stats" in cmd + assert "--human-readable" in cmd + + +class TestSourceSnapshotPath: + """When source has snapshots, rsync should read from latest/.""" + + def test_local_to_local_btrfs_source(self) -> None: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint( + volume="src", + subdir="photos", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + destination=DestinationSyncEndpoint(volume="dst", subdir="backup"), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + cmd = build_rsync_command(sync, config) + assert cmd[-2] == "/mnt/src/photos/latest/" + assert cmd[-1] == "/mnt/dst/backup/" + + def test_local_to_local_hard_link_source(self) -> None: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint( + volume="src", + hard_link_snapshots=HardLinkSnapshotConfig(enabled=True), + ), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + cmd = build_rsync_command(sync, config) + assert cmd[-2] == "/mnt/src/latest/" + assert cmd[-1] == "/mnt/dst/" + + def test_local_to_remote_btrfs_source(self) -> None: + server = SshEndpoint(slug="nas", host="nas.local", user="backup") + src = LocalVolume(slug="src", path="/mnt/src") + dst = RemoteVolume(slug="dst", ssh_endpoint="nas", path="/backup") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint( + volume="src", + subdir="data", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + ssh_endpoints={"nas": server}, + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + resolved = resolve_all_endpoints(config) + + cmd = build_rsync_command(sync, config, resolved_endpoints=resolved) + assert cmd[-2] == "/mnt/src/data/latest/" + assert cmd[-1] == "backup@nas.local:/backup/" + + def test_remote_to_local_hard_link_source(self) -> None: + server = SshEndpoint(slug="srv", host="srv.local", user="admin") + src = RemoteVolume(slug="src", ssh_endpoint="srv", path="/data") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint( + volume="src", + hard_link_snapshots=HardLinkSnapshotConfig(enabled=True), + ), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + ssh_endpoints={"srv": server}, + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + resolved = resolve_all_endpoints(config) + + cmd = build_rsync_command(sync, config, resolved_endpoints=resolved) + assert cmd[-2] == "admin@srv.local:/data/latest/" + assert cmd[-1] == "/mnt/dst/" + + def test_no_snapshots_source_unchanged(self) -> None: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="photos"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + cmd = build_rsync_command(sync, config) + assert cmd[-2] == "/mnt/src/photos/" + assert cmd[-1] == "/mnt/dst/" + + def test_remote_to_remote_same_server_btrfs_source(self) -> None: + server = SshEndpoint( + slug="nas", + host="nas.local", + user="backup", + ) + src = RemoteVolume(slug="src", ssh_endpoint="nas", path="/data/src") + dst = RemoteVolume(slug="dst", ssh_endpoint="nas", path="/data/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint( + volume="src", + subdir="photos", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + ssh_endpoints={"nas": server}, + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + resolved = resolve_all_endpoints(config) + + cmd = build_rsync_command(sync, config, resolved_endpoints=resolved) + inner = cmd[-1] + assert "/data/src/photos/latest/" in inner + assert "/data/dst/" in inner + + +class TestDestSuffix: + def test_default_bare(self) -> None: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + cmd = build_rsync_command(sync, config) + assert cmd[-1] == "/mnt/dst/" + + def test_default_btrfs(self) -> None: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + cmd = build_rsync_command(sync, config, dest_suffix="latest") + assert cmd[-1] == "/mnt/dst/latest/" + + def test_custom_suffix(self) -> None: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + cmd = build_rsync_command( + sync, config, dest_suffix="snapshots/2026-02-21T12:00:00.000Z" + ) + assert cmd[-1] == "/mnt/dst/snapshots/2026-02-21T12:00:00.000Z/" + + def test_local_to_remote(self) -> None: + server = SshEndpoint(slug="nas", host="nas.local", user="backup") + src = LocalVolume(slug="src", path="/mnt/src") + dst = RemoteVolume(slug="dst", ssh_endpoint="nas", path="/backup") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + ssh_endpoints={"nas": server}, + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + resolved = resolve_all_endpoints(config) + + cmd = build_rsync_command( + sync, + config, + resolved_endpoints=resolved, + dest_suffix="snapshots/T1", + ) + assert cmd[-1] == "backup@nas.local:/backup/snapshots/T1/" + + def test_remote_to_local(self) -> None: + server = SshEndpoint(slug="srv", host="srv.local", user="admin") + src = RemoteVolume(slug="src", ssh_endpoint="srv", path="/data") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + ssh_endpoints={"srv": server}, + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + resolved = resolve_all_endpoints(config) + + cmd = build_rsync_command( + sync, + config, + resolved_endpoints=resolved, + dest_suffix="snapshots/T1", + ) + assert cmd[-1] == "/mnt/dst/snapshots/T1/" + + +class TestRunRsync: + @patch("nbkp.sync.rsync.subprocess.run") + def test_run_rsync(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock( + returncode=0, stdout="done", stderr="" + ) + src = LocalVolume(slug="src", path="/src") + dst = LocalVolume(slug="dst", path="/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + result = run_rsync(sync, config) + assert result.returncode == 0 + mock_run.assert_called_once() + + @patch("nbkp.sync.rsync.subprocess.Popen") + def test_run_rsync_streams_output(self, mock_popen: MagicMock) -> None: + src = LocalVolume(slug="src", path="/src") + dst = LocalVolume(slug="dst", path="/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + streamed = "sending incremental file list\r\nfile.txt\n" + proc = MagicMock() + proc.stdout = io.StringIO(streamed) + proc.poll.side_effect = lambda: ( + 0 if proc.stdout.tell() == len(streamed) else None + ) + proc.wait.return_value = 0 + mock_popen.return_value = proc + + chunks: list[str] = [] + result = run_rsync(sync, config, on_output=chunks.append) + + assert result.returncode == 0 + assert result.stdout == streamed + assert "".join(chunks) == streamed + mock_popen.assert_called_once() diff --git a/tests/sync/test_runner.py b/tests/sync/test_runner.py new file mode 100644 index 0000000..f2985b0 --- /dev/null +++ b/tests/sync/test_runner.py @@ -0,0 +1,329 @@ +"""Tests for nbkp.runner.""" + +from __future__ import annotations + +from unittest.mock import MagicMock, patch + +from nbkp.config import ( + BtrfsSnapshotConfig, + Config, + DestinationSyncEndpoint, + LocalVolume, + RemoteVolume, + SshEndpoint, + SyncConfig, + SyncEndpoint, +) +from nbkp.check import ( + SyncReason, + SyncStatus, + VolumeReason, + VolumeStatus, +) +from nbkp.sync import run_all_syncs + + +def _make_local_config() -> Config: + src = LocalVolume(slug="src", path="/src") + dst = LocalVolume(slug="dst", path="/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + return Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + +def _make_btrfs_config() -> Config: + src = LocalVolume(slug="src", path="/src") + dst = LocalVolume(slug="dst", path="/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + return Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + +def _make_btrfs_config_with_max() -> Config: + src = LocalVolume(slug="src", path="/src") + dst = LocalVolume(slug="dst", path="/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True, max_snapshots=5), + ), + ) + return Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + +def _make_remote_same_server_btrfs_config() -> Config: + server = SshEndpoint(slug="server", host="nas.local", user="backup") + src = RemoteVolume( + slug="src", + ssh_endpoint="server", + path="/data", + ) + dst = RemoteVolume( + slug="dst", + ssh_endpoint="server", + path="/backup", + ) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + return Config( + ssh_endpoints={"server": server}, + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + +def _active_statuses( + config: Config, +) -> tuple[dict[str, VolumeStatus], dict[str, SyncStatus]]: + vol_statuses = { + name: VolumeStatus( + slug=name, + config=vol, + reasons=[], + ) + for name, vol in config.volumes.items() + } + sync_statuses = { + name: SyncStatus( + slug=name, + config=sync, + source_status=vol_statuses[sync.source.volume], + destination_status=vol_statuses[sync.destination.volume], + reasons=[], + ) + for name, sync in config.syncs.items() + } + return vol_statuses, sync_statuses + + +def _inactive_statuses( + config: Config, +) -> tuple[dict[str, VolumeStatus], dict[str, SyncStatus]]: + vol_statuses = { + name: VolumeStatus( + slug=name, + config=vol, + reasons=[VolumeReason.UNREACHABLE], + ) + for name, vol in config.volumes.items() + } + sync_statuses = { + name: SyncStatus( + slug=name, + config=sync, + source_status=vol_statuses[sync.source.volume], + destination_status=vol_statuses[sync.destination.volume], + reasons=[SyncReason.SOURCE_UNAVAILABLE], + ) + for name, sync in config.syncs.items() + } + return vol_statuses, sync_statuses + + +class TestRunAllSyncs: + @patch("nbkp.sync.runner.run_rsync") + def test_successful_sync(self, mock_rsync: MagicMock) -> None: + config = _make_local_config() + _, sync_statuses = _active_statuses(config) + mock_rsync.return_value = MagicMock( + returncode=0, stdout="done\n", stderr="" + ) + + results = run_all_syncs(config, sync_statuses) + assert len(results) == 1 + assert results[0].success is True + assert results[0].rsync_exit_code == 0 + + def test_inactive_sync(self) -> None: + config = _make_local_config() + _, sync_statuses = _inactive_statuses(config) + + results = run_all_syncs(config, sync_statuses) + assert len(results) == 1 + assert results[0].success is False + assert "not active" in (results[0].error or "") + + @patch("nbkp.sync.runner.run_rsync") + def test_rsync_failure(self, mock_rsync: MagicMock) -> None: + config = _make_local_config() + _, sync_statuses = _active_statuses(config) + mock_rsync.return_value = MagicMock( + returncode=23, stdout="", stderr="error" + ) + + results = run_all_syncs(config, sync_statuses) + assert results[0].success is False + assert results[0].rsync_exit_code == 23 + + @patch("nbkp.sync.runner.run_rsync") + def test_filter_by_sync_slug(self, mock_rsync: MagicMock) -> None: + config = _make_local_config() + _, sync_statuses = _active_statuses(config) + mock_rsync.return_value = MagicMock( + returncode=0, stdout="done\n", stderr="" + ) + + results = run_all_syncs( + config, sync_statuses, only_syncs=["nonexistent"] + ) + assert len(results) == 0 + + @patch("nbkp.sync.runner.create_snapshot") + @patch("nbkp.sync.runner.run_rsync") + def test_btrfs_snapshot_after_sync( + self, + mock_rsync: MagicMock, + mock_snap: MagicMock, + ) -> None: + config = _make_btrfs_config() + _, sync_statuses = _active_statuses(config) + mock_rsync.return_value = MagicMock( + returncode=0, stdout="done\n", stderr="" + ) + mock_snap.return_value = "/dst/snapshots/20240115T120000Z" + + results = run_all_syncs(config, sync_statuses) + assert results[0].success is True + assert results[0].snapshot_path == "/dst/snapshots/20240115T120000Z" + mock_snap.assert_called_once() + + @patch("nbkp.sync.runner.run_rsync") + def test_btrfs_snapshot_skipped_on_dry_run( + self, + mock_rsync: MagicMock, + ) -> None: + config = _make_btrfs_config() + _, sync_statuses = _active_statuses(config) + mock_rsync.return_value = MagicMock( + returncode=0, stdout="done\n", stderr="" + ) + + results = run_all_syncs(config, sync_statuses, dry_run=True) + assert results[0].success is True + assert results[0].snapshot_path is None + + @patch("nbkp.sync.runner.create_snapshot") + @patch("nbkp.sync.runner.run_rsync") + def test_btrfs_no_link_dest( + self, + mock_rsync: MagicMock, + mock_snap: MagicMock, + ) -> None: + config = _make_btrfs_config() + _, sync_statuses = _active_statuses(config) + mock_rsync.return_value = MagicMock( + returncode=0, stdout="done\n", stderr="" + ) + mock_snap.return_value = "/dst/snapshots/20240115T120000Z" + + run_all_syncs(config, sync_statuses) + + # Btrfs workflow no longer passes --link-dest + call_kwargs = mock_rsync.call_args + assert call_kwargs.kwargs.get("link_dest") is None + + @patch("nbkp.sync.runner.create_snapshot") + @patch("nbkp.sync.runner.run_rsync") + def test_remote_same_server_with_btrfs( + self, + mock_rsync: MagicMock, + mock_snap: MagicMock, + ) -> None: + config = _make_remote_same_server_btrfs_config() + _, sync_statuses = _active_statuses(config) + mock_rsync.return_value = MagicMock( + returncode=0, stdout="done\n", stderr="" + ) + mock_snap.return_value = "/backup/snapshots/20240115T120000Z" + + results = run_all_syncs(config, sync_statuses) + assert results[0].success is True + assert results[0].snapshot_path is not None + mock_snap.assert_called_once() + + @patch("nbkp.sync.runner.create_snapshot") + @patch("nbkp.sync.runner.run_rsync") + def test_snapshot_failure( + self, + mock_rsync: MagicMock, + mock_snap: MagicMock, + ) -> None: + config = _make_btrfs_config() + _, sync_statuses = _active_statuses(config) + mock_rsync.return_value = MagicMock( + returncode=0, stdout="done\n", stderr="" + ) + mock_snap.side_effect = RuntimeError("btrfs failed") + + results = run_all_syncs(config, sync_statuses) + assert results[0].success is False + assert "Snapshot failed" in (results[0].error or "") + + @patch("nbkp.sync.runner.btrfs_prune_snapshots") + @patch("nbkp.sync.runner.create_snapshot") + @patch("nbkp.sync.runner.run_rsync") + def test_auto_prune_after_snapshot( + self, + mock_rsync: MagicMock, + mock_snap: MagicMock, + mock_prune: MagicMock, + ) -> None: + config = _make_btrfs_config_with_max() + _, sync_statuses = _active_statuses(config) + mock_rsync.return_value = MagicMock( + returncode=0, stdout="done\n", stderr="" + ) + mock_snap.return_value = "/dst/snapshots/20240115T120000Z" + mock_prune.return_value = ["/dst/snapshots/old"] + + results = run_all_syncs(config, sync_statuses) + assert results[0].success is True + assert results[0].pruned_paths == ["/dst/snapshots/old"] + mock_prune.assert_called_once() + + @patch("nbkp.sync.runner.btrfs_prune_snapshots") + @patch("nbkp.sync.runner.create_snapshot") + @patch("nbkp.sync.runner.run_rsync") + def test_no_auto_prune_without_max_snapshots( + self, + mock_rsync: MagicMock, + mock_snap: MagicMock, + mock_prune: MagicMock, + ) -> None: + config = _make_btrfs_config() + _, sync_statuses = _active_statuses(config) + mock_rsync.return_value = MagicMock( + returncode=0, stdout="done\n", stderr="" + ) + mock_snap.return_value = "/dst/snapshots/20240115T120000Z" + + results = run_all_syncs(config, sync_statuses) + assert results[0].success is True + assert results[0].pruned_paths is None + mock_prune.assert_not_called() diff --git a/tests/test_backup.py b/tests/test_backup.py deleted file mode 100644 index 8ad6c04..0000000 --- a/tests/test_backup.py +++ /dev/null @@ -1,78 +0,0 @@ -""" -Tests for backup functionality. -""" - -import pytest -import tempfile -from pathlib import Path -from ssb.backup import BackupManager -from ssb.encryption import EncryptionManager - - -class TestBackupManager: - """Test cases for BackupManager.""" - - def test_init(self): - """Test BackupManager initialization.""" - with tempfile.TemporaryDirectory() as temp_dir: - backup_manager = BackupManager(temp_dir) - assert backup_manager.backup_dir == Path(temp_dir) - assert isinstance( - backup_manager.encryption_manager, EncryptionManager - ) - - def test_create_file_backup(self): - """Test creating a backup of a file.""" - with tempfile.TemporaryDirectory() as temp_dir: - # Create a test file - test_file = Path(temp_dir) / "test.txt" - test_file.write_text("Hello, World!") - - # Create a separate backup directory - backup_dir = Path(temp_dir) / "backups" - backup_manager = BackupManager(str(backup_dir)) - backup_path = backup_manager.create_backup(str(test_file)) - - assert Path(backup_path).exists() - assert Path(backup_path).read_text() == "Hello, World!" - - def test_create_directory_backup(self): - """Test creating a backup of a directory.""" - with tempfile.TemporaryDirectory() as temp_dir: - # Create a test directory with files - test_dir = Path(temp_dir) / "test_dir" - test_dir.mkdir() - (test_dir / "file1.txt").write_text("File 1") - (test_dir / "file2.txt").write_text("File 2") - - # Create a separate backup directory - backup_dir = Path(temp_dir) / "backups" - backup_manager = BackupManager(str(backup_dir)) - backup_path = backup_manager.create_backup(str(test_dir)) - - assert Path(backup_path).exists() - assert (Path(backup_path) / "file1.txt").exists() - assert (Path(backup_path) / "file2.txt").exists() - - def test_backup_nonexistent_source(self): - """Test backup with non-existent source.""" - with tempfile.TemporaryDirectory() as temp_dir: - backup_dir = Path(temp_dir) / "backups" - backup_manager = BackupManager(str(backup_dir)) - - with pytest.raises(FileNotFoundError): - backup_manager.create_backup("/nonexistent/path") - - def test_list_backups(self): - """Test listing backups.""" - with tempfile.TemporaryDirectory() as temp_dir: - backup_dir = Path(temp_dir) / "backups" - backup_manager = BackupManager(str(backup_dir)) - - # Create some test files in backup directory - (backup_dir / "backup1").touch() - (backup_dir / "backup2").touch() - - backups = backup_manager.list_backups() - assert "backup1" in backups - assert "backup2" in backups diff --git a/tests/test_check.py b/tests/test_check.py new file mode 100644 index 0000000..c4a8eb7 --- /dev/null +++ b/tests/test_check.py @@ -0,0 +1,3014 @@ +"""Tests for nbkp.check and nbkp.output.""" + +from __future__ import annotations + +from pathlib import Path +from unittest.mock import MagicMock, patch + +from nbkp.config import ( + BtrfsSnapshotConfig, + Config, + DestinationSyncEndpoint, + HardLinkSnapshotConfig, + LocalVolume, + RemoteVolume, + ResolvedEndpoint, + ResolvedEndpoints, + RsyncOptions, + SshEndpoint, + SshConnectionOptions, + SyncConfig, + SyncEndpoint, +) +from nbkp.output import OutputFormat +from nbkp.sync import SyncResult +from nbkp.check import ( + SyncReason, + SyncStatus, + VolumeReason, + VolumeStatus, + _check_btrfs_filesystem, + _check_btrfs_mount_option, + _check_btrfs_subvolume, + _check_command_available, + check_all_syncs, + check_sync, + check_volume, +) + + +class TestLocalVolume: + def test_construction(self) -> None: + vol = LocalVolume(slug="data", path="/mnt/data") + assert vol.slug == "data" + assert vol.path == "/mnt/data" + + def test_frozen(self) -> None: + import pydantic + + vol = LocalVolume(slug="data", path="/mnt/data") + try: + vol.slug = "other" # type: ignore[misc] + assert False, "Should be frozen" + except AttributeError, pydantic.ValidationError: + pass + + +class TestSshEndpoint: + def test_construction_defaults(self) -> None: + server = SshEndpoint(slug="nas-server", host="nas.local") + assert server.slug == "nas-server" + assert server.host == "nas.local" + assert server.port == 22 + assert server.user is None + assert server.key is None + assert server.connection_options == SshConnectionOptions() + assert server.connection_options.connect_timeout == 10 + + def test_construction_full(self) -> None: + server = SshEndpoint( + slug="nas-server", + host="nas.local", + port=2222, + user="backup", + key="~/.ssh/id_rsa", + connection_options=SshConnectionOptions(connect_timeout=30), + ) + assert server.port == 2222 + assert server.user == "backup" + assert server.key == "~/.ssh/id_rsa" + assert server.connection_options.connect_timeout == 30 + + def test_construction_with_proxy_jump(self) -> None: + server = SshEndpoint( + slug="target", + host="target.internal", + proxy_jump="bastion", + ) + assert server.proxy_jump == "bastion" + + def test_proxy_jump_defaults_to_none(self) -> None: + server = SshEndpoint(slug="nas-server", host="nas.local") + assert server.proxy_jump is None + + +class TestRemoteVolume: + def test_construction(self) -> None: + vol = RemoteVolume( + slug="nas", + ssh_endpoint="nas-server", + path="/backup", + ) + assert vol.slug == "nas" + assert vol.ssh_endpoint == "nas-server" + assert vol.path == "/backup" + + def test_frozen(self) -> None: + import pydantic + + vol = RemoteVolume( + slug="nas", + ssh_endpoint="nas-server", + path="/backup", + ) + try: + vol.path = "other" # type: ignore[misc] + assert False, "Should be frozen" + except AttributeError, pydantic.ValidationError: + pass + + +class TestSyncEndpoint: + def test_construction_defaults(self) -> None: + ep = SyncEndpoint(volume="data") + assert ep.volume == "data" + assert ep.subdir is None + + def test_construction_with_subdir(self) -> None: + ep = SyncEndpoint(volume="data", subdir="photos") + assert ep.subdir == "photos" + + +class TestSyncConfig: + def test_construction_defaults(self) -> None: + sc = SyncConfig( + slug="sync1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + assert sc.slug == "sync1" + assert sc.enabled is True + assert sc.destination.btrfs_snapshots.enabled is False + assert sc.rsync_options.default_options_override is None + assert sc.rsync_options.extra_options == [] + assert sc.rsync_options.checksum is True + assert sc.rsync_options.compress is False + assert sc.filters == [] + assert sc.filter_file is None + + def test_construction_full(self) -> None: + sc = SyncConfig( + slug="sync1", + source=SyncEndpoint(volume="src", subdir="a"), + destination=DestinationSyncEndpoint( + volume="dst", + subdir="b", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + enabled=False, + rsync_options=RsyncOptions( + default_options_override=["-a", "--delete"], + extra_options=["--bwlimit=1000"], + compress=True, + ), + filters=["+ *.jpg", "- *.tmp"], + filter_file="/etc/nbkp/filters.rules", + ) + assert sc.enabled is False + assert sc.destination.btrfs_snapshots.enabled is True + assert sc.rsync_options.default_options_override == [ + "-a", + "--delete", + ] + assert sc.rsync_options.extra_options == ["--bwlimit=1000"] + assert sc.rsync_options.compress is True + assert sc.rsync_options.checksum is True + assert sc.filters == ["+ *.jpg", "- *.tmp"] + assert sc.filter_file == "/etc/nbkp/filters.rules" + + +class TestConfig: + def test_empty(self) -> None: + cfg = Config() + assert cfg.volumes == {} + assert cfg.syncs == {} + + def test_with_data(self) -> None: + vol = LocalVolume(slug="data", path="/mnt/data") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="data"), + destination=DestinationSyncEndpoint(volume="data"), + ) + cfg = Config( + volumes={"data": vol}, + syncs={"s1": sync}, + ) + assert "data" in cfg.volumes + assert "s1" in cfg.syncs + + +class TestCrossServerValidation: + def test_cross_server_remote_to_remote_rejected(self) -> None: + import pydantic + import pytest + + with pytest.raises(pydantic.ValidationError): + Config( + ssh_endpoints={ + "a": SshEndpoint(slug="a", host="a.com"), + "b": SshEndpoint(slug="b", host="b.com"), + }, + volumes={ + "src": RemoteVolume( + slug="src", + ssh_endpoint="a", + path="/s", + ), + "dst": RemoteVolume( + slug="dst", + ssh_endpoint="b", + path="/d", + ), + }, + syncs={ + "x": SyncConfig( + slug="x", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + }, + ) + + def test_same_server_remote_to_remote_allowed(self) -> None: + config = Config( + ssh_endpoints={ + "server": SshEndpoint(slug="server", host="server.com"), + }, + volumes={ + "src": RemoteVolume( + slug="src", + ssh_endpoint="server", + path="/src", + ), + "dst": RemoteVolume( + slug="dst", + ssh_endpoint="server", + path="/dst", + ), + }, + syncs={ + "x": SyncConfig( + slug="x", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + }, + ) + assert "x" in config.syncs + + +class TestVolumeStatus: + def test_construction_active(self) -> None: + vol = LocalVolume(slug="data", path="/mnt/data") + vs = VolumeStatus( + slug="data", + config=vol, + reasons=[], + ) + assert vs.active is True + + def test_construction_inactive(self) -> None: + vol = LocalVolume(slug="data", path="/mnt/data") + vs = VolumeStatus( + slug="data", + config=vol, + reasons=[VolumeReason.SENTINEL_NOT_FOUND], + ) + assert vs.active is False + + +class TestSyncStatus: + def test_construction_active(self) -> None: + vol = LocalVolume(slug="data", path="/mnt/data") + vs = VolumeStatus( + slug="data", + config=vol, + reasons=[], + ) + sc = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="data"), + destination=DestinationSyncEndpoint(volume="data"), + ) + ss = SyncStatus( + slug="s1", + config=sc, + source_status=vs, + destination_status=vs, + reasons=[], + ) + assert ss.active is True + + def test_construction_inactive(self) -> None: + vol = LocalVolume(slug="data", path="/mnt/data") + vs = VolumeStatus( + slug="data", + config=vol, + reasons=[], + ) + sc = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="data"), + destination=DestinationSyncEndpoint(volume="data"), + ) + ss = SyncStatus( + slug="s1", + config=sc, + source_status=vs, + destination_status=vs, + reasons=[SyncReason.DISABLED], + ) + assert ss.active is False + + +class TestSyncResult: + def test_construction_defaults(self) -> None: + sr = SyncResult( + sync_slug="s1", + success=True, + dry_run=False, + rsync_exit_code=0, + output="done", + ) + assert sr.snapshot_path is None + assert sr.error is None + + def test_construction_full(self) -> None: + sr = SyncResult( + sync_slug="s1", + success=False, + dry_run=False, + rsync_exit_code=1, + output="", + error="failed", + snapshot_path="/snap/2024", + ) + assert sr.error == "failed" + assert sr.snapshot_path == "/snap/2024" + + +class TestSlugValidation: + def test_valid_simple(self) -> None: + vol = LocalVolume(slug="data", path="/mnt/data") + assert vol.slug == "data" + + def test_valid_kebab_case(self) -> None: + vol = LocalVolume(slug="my-usb-drive", path="/mnt") + assert vol.slug == "my-usb-drive" + + def test_valid_with_numbers(self) -> None: + vol = LocalVolume(slug="nas2", path="/mnt") + assert vol.slug == "nas2" + + def test_invalid_uppercase(self) -> None: + import pytest + + with pytest.raises(Exception): + LocalVolume(slug="MyDrive", path="/mnt") + + def test_invalid_underscore(self) -> None: + import pytest + + with pytest.raises(Exception): + LocalVolume(slug="my_drive", path="/mnt") + + def test_invalid_spaces(self) -> None: + import pytest + + with pytest.raises(Exception): + LocalVolume(slug="my drive", path="/mnt") + + def test_invalid_trailing_hyphen(self) -> None: + import pytest + + with pytest.raises(Exception): + LocalVolume(slug="drive-", path="/mnt") + + def test_invalid_leading_hyphen(self) -> None: + import pytest + + with pytest.raises(Exception): + LocalVolume(slug="-drive", path="/mnt") + + def test_invalid_empty(self) -> None: + import pytest + + with pytest.raises(Exception): + LocalVolume(slug="", path="/mnt") + + def test_invalid_too_long(self) -> None: + import pytest + + with pytest.raises(Exception): + LocalVolume(slug="a" * 51, path="/mnt") + + def test_valid_max_length(self) -> None: + vol = LocalVolume(slug="a" * 50, path="/mnt") + assert len(vol.slug) == 50 + + +class TestOutputFormat: + def test_values(self) -> None: + assert OutputFormat.HUMAN.value == "human" + assert OutputFormat.JSON.value == "json" + + +# --- Check function tests (moved from test_checks.py) --- + + +def _remote_config( + vol_name: str = "nas", + server_name: str = "nas-server", + host: str = "nas.local", + path: str = "/backup", +) -> tuple[RemoteVolume, Config]: + server = SshEndpoint(slug=server_name, host=host) + vol = RemoteVolume( + slug=vol_name, + ssh_endpoint=server_name, + path=path, + ) + config = Config( + ssh_endpoints={server_name: server}, + volumes={vol_name: vol}, + ) + return vol, config + + +def _make_resolved(config: Config) -> ResolvedEndpoints: + """Build resolved endpoints from config for testing.""" + result: ResolvedEndpoints = {} + for slug, vol in config.volumes.items(): + if isinstance(vol, RemoteVolume): + server = config.ssh_endpoints[vol.ssh_endpoint] + proxy_chain = config.resolve_proxy_chain(server) + result[slug] = ResolvedEndpoint( + server=server, proxy_chain=proxy_chain + ) + return result + + +class TestCheckLocalVolume: + def test_active(self, tmp_path: Path) -> None: + vol = LocalVolume(slug="data", path=str(tmp_path)) + (tmp_path / ".nbkp-vol").touch() + status = check_volume(vol) + assert status.active is True + assert status.reasons == [] + + def test_inactive(self, tmp_path: Path) -> None: + vol = LocalVolume(slug="data", path=str(tmp_path)) + status = check_volume(vol) + assert status.active is False + assert status.reasons == [VolumeReason.SENTINEL_NOT_FOUND] + + +class TestCheckRemoteVolume: + @patch("nbkp.check.run_remote_command") + def test_active(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=0) + vol, config = _remote_config() + resolved = _make_resolved(config) + status = check_volume(vol, resolved) + assert status.active is True + assert status.reasons == [] + server = config.ssh_endpoints["nas-server"] + mock_run.assert_called_once_with( + server, ["test", "-f", "/backup/.nbkp-vol"], [] + ) + + @patch("nbkp.check.run_remote_command") + def test_inactive(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=1) + vol, config = _remote_config() + resolved = _make_resolved(config) + status = check_volume(vol, resolved) + assert status.active is False + assert status.reasons == [VolumeReason.UNREACHABLE] + + +class TestCheckCommandAvailableLocal: + @patch("nbkp.check.shutil.which") + def test_command_found(self, mock_which: MagicMock) -> None: + mock_which.return_value = "/usr/bin/rsync" + vol = LocalVolume(slug="data", path="/mnt/data") + assert _check_command_available(vol, "rsync", {}) is True + mock_which.assert_called_once_with("rsync") + + @patch("nbkp.check.shutil.which") + def test_command_not_found(self, mock_which: MagicMock) -> None: + mock_which.return_value = None + vol = LocalVolume(slug="data", path="/mnt/data") + assert _check_command_available(vol, "rsync", {}) is False + mock_which.assert_called_once_with("rsync") + + +class TestCheckCommandAvailableRemote: + @patch("nbkp.check.run_remote_command") + def test_command_found(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=0) + vol, config = _remote_config() + resolved = _make_resolved(config) + assert _check_command_available(vol, "rsync", resolved) is True + server = config.ssh_endpoints["nas-server"] + mock_run.assert_called_once_with(server, ["which", "rsync"], []) + + @patch("nbkp.check.run_remote_command") + def test_command_not_found(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=1) + vol, config = _remote_config() + resolved = _make_resolved(config) + assert _check_command_available(vol, "btrfs", resolved) is False + server = config.ssh_endpoints["nas-server"] + mock_run.assert_called_once_with(server, ["which", "btrfs"], []) + + +class TestCheckBtrfsFilesystemLocal: + @patch("nbkp.check.subprocess.run") + def test_btrfs(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=0, stdout="btrfs\n") + vol = LocalVolume(slug="data", path="/mnt/data") + assert _check_btrfs_filesystem(vol, {}) is True + mock_run.assert_called_once_with( + ["stat", "-f", "-c", "%T", "/mnt/data"], + capture_output=True, + text=True, + ) + + @patch("nbkp.check.subprocess.run") + def test_not_btrfs(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=0, stdout="ext2/ext3\n") + vol = LocalVolume(slug="data", path="/mnt/data") + assert _check_btrfs_filesystem(vol, {}) is False + + @patch("nbkp.check.subprocess.run") + def test_stat_failure(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=1, stdout="") + vol = LocalVolume(slug="data", path="/mnt/data") + assert _check_btrfs_filesystem(vol, {}) is False + + +class TestCheckBtrfsFilesystemRemote: + @patch("nbkp.check.run_remote_command") + def test_btrfs(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=0, stdout="btrfs\n") + vol, config = _remote_config() + resolved = _make_resolved(config) + assert _check_btrfs_filesystem(vol, resolved) is True + server = config.ssh_endpoints["nas-server"] + mock_run.assert_called_once_with( + server, + ["stat", "-f", "-c", "%T", "/backup"], + [], + ) + + @patch("nbkp.check.run_remote_command") + def test_not_btrfs(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=0, stdout="ext2/ext3\n") + vol, config = _remote_config() + resolved = _make_resolved(config) + assert _check_btrfs_filesystem(vol, resolved) is False + + +class TestCheckBtrfsSubvolumeLocal: + @patch("nbkp.check.subprocess.run") + def test_is_subvolume(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=0, stdout="256\n") + vol = LocalVolume(slug="data", path="/mnt/data") + assert _check_btrfs_subvolume(vol, None, {}) is True + mock_run.assert_called_once_with( + ["stat", "-c", "%i", "/mnt/data"], + capture_output=True, + text=True, + ) + + @patch("nbkp.check.subprocess.run") + def test_is_subvolume_with_subdir(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=0, stdout="256\n") + vol = LocalVolume(slug="data", path="/mnt/data") + assert _check_btrfs_subvolume(vol, "backup", {}) is True + mock_run.assert_called_once_with( + ["stat", "-c", "%i", "/mnt/data/backup"], + capture_output=True, + text=True, + ) + + @patch("nbkp.check.subprocess.run") + def test_not_subvolume(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=0, stdout="1234\n") + vol = LocalVolume(slug="data", path="/mnt/data") + assert _check_btrfs_subvolume(vol, None, {}) is False + + @patch("nbkp.check.subprocess.run") + def test_stat_failure(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=1, stdout="") + vol = LocalVolume(slug="data", path="/mnt/data") + assert _check_btrfs_subvolume(vol, None, {}) is False + + +class TestCheckBtrfsSubvolumeRemote: + @patch("nbkp.check.run_remote_command") + def test_is_subvolume(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=0, stdout="256\n") + vol, config = _remote_config() + resolved = _make_resolved(config) + assert _check_btrfs_subvolume(vol, None, resolved) is True + server = config.ssh_endpoints["nas-server"] + mock_run.assert_called_once_with( + server, + ["stat", "-c", "%i", "/backup"], + [], + ) + + @patch("nbkp.check.run_remote_command") + def test_is_subvolume_with_subdir(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=0, stdout="256\n") + vol, config = _remote_config() + resolved = _make_resolved(config) + assert _check_btrfs_subvolume(vol, "data", resolved) is True + server = config.ssh_endpoints["nas-server"] + mock_run.assert_called_once_with( + server, + ["stat", "-c", "%i", "/backup/data"], + [], + ) + + @patch("nbkp.check.run_remote_command") + def test_not_subvolume(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=0, stdout="1234\n") + vol, config = _remote_config() + resolved = _make_resolved(config) + assert _check_btrfs_subvolume(vol, None, resolved) is False + + +class TestCheckBtrfsMountOptionLocal: + @patch("nbkp.check.subprocess.run") + def test_option_present(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock( + returncode=0, + stdout="rw,relatime,user_subvol_rm_allowed\n", + ) + vol = LocalVolume(slug="data", path="/mnt/data") + assert ( + _check_btrfs_mount_option(vol, "user_subvol_rm_allowed", {}) + is True + ) + mock_run.assert_called_once_with( + ["findmnt", "-T", "/mnt/data", "-n", "-o", "OPTIONS"], + capture_output=True, + text=True, + ) + + @patch("nbkp.check.subprocess.run") + def test_option_missing(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=0, stdout="rw,relatime\n") + vol = LocalVolume(slug="data", path="/mnt/data") + assert ( + _check_btrfs_mount_option(vol, "user_subvol_rm_allowed", {}) + is False + ) + + @patch("nbkp.check.subprocess.run") + def test_findmnt_failure(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=1, stdout="") + vol = LocalVolume(slug="data", path="/mnt/data") + assert ( + _check_btrfs_mount_option(vol, "user_subvol_rm_allowed", {}) + is False + ) + + +class TestCheckBtrfsMountOptionRemote: + @patch("nbkp.check.run_remote_command") + def test_option_present(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock( + returncode=0, + stdout="rw,relatime,user_subvol_rm_allowed\n", + ) + vol, config = _remote_config() + resolved = _make_resolved(config) + assert ( + _check_btrfs_mount_option(vol, "user_subvol_rm_allowed", resolved) + is True + ) + server = config.ssh_endpoints["nas-server"] + mock_run.assert_called_once_with( + server, + ["findmnt", "-T", "/backup", "-n", "-o", "OPTIONS"], + [], + ) + + @patch("nbkp.check.run_remote_command") + def test_option_missing(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=0, stdout="rw,relatime\n") + vol, config = _remote_config() + resolved = _make_resolved(config) + assert ( + _check_btrfs_mount_option(vol, "user_subvol_rm_allowed", resolved) + is False + ) + + +class TestCheckSync: + def _make_config( + self, tmp_src: Path, tmp_dst: Path + ) -> tuple[Config, SyncConfig]: + src_vol = LocalVolume(slug="src", path=str(tmp_src)) + dst_vol = LocalVolume(slug="dst", path=str(tmp_dst)) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="data"), + destination=DestinationSyncEndpoint(volume="dst", subdir="backup"), + ) + config = Config( + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + return config, sync + + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/rsync", + ) + def test_active_sync(self, mock_which: MagicMock, tmp_path: Path) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + + (src / ".nbkp-vol").touch() + (dst / ".nbkp-vol").touch() + (src / "data").mkdir() + (src / "data" / ".nbkp-src").touch() + (dst / "backup").mkdir() + (dst / "backup" / ".nbkp-dst").touch() + + config, sync = self._make_config(src, dst) + vol_statuses = { + "src": VolumeStatus( + slug="src", + config=config.volumes["src"], + reasons=[], + ), + "dst": VolumeStatus( + slug="dst", + config=config.volumes["dst"], + reasons=[], + ), + } + + status = check_sync(sync, config, vol_statuses) + assert status.active is True + assert status.reasons == [] + + def test_disabled_sync(self, tmp_path: Path) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + + config, _ = self._make_config(src, dst) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="data"), + destination=DestinationSyncEndpoint(volume="dst", subdir="backup"), + enabled=False, + ) + vol_statuses = { + "src": VolumeStatus( + slug="src", + config=config.volumes["src"], + reasons=[], + ), + "dst": VolumeStatus( + slug="dst", + config=config.volumes["dst"], + reasons=[], + ), + } + + status = check_sync(sync, config, vol_statuses) + assert status.active is False + assert status.reasons == [SyncReason.DISABLED] + + def test_source_unavailable(self, tmp_path: Path) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + + config, sync = self._make_config(src, dst) + vol_statuses = { + "src": VolumeStatus( + slug="src", + config=config.volumes["src"], + reasons=[VolumeReason.SENTINEL_NOT_FOUND], + ), + "dst": VolumeStatus( + slug="dst", + config=config.volumes["dst"], + reasons=[], + ), + } + + status = check_sync(sync, config, vol_statuses) + assert status.active is False + assert SyncReason.SOURCE_UNAVAILABLE in status.reasons + + def test_missing_src_sentinel(self, tmp_path: Path) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + (src / "data").mkdir() + (dst / "backup").mkdir() + (dst / "backup" / ".nbkp-dst").touch() + + config, sync = self._make_config(src, dst) + vol_statuses = { + "src": VolumeStatus( + slug="src", + config=config.volumes["src"], + reasons=[], + ), + "dst": VolumeStatus( + slug="dst", + config=config.volumes["dst"], + reasons=[], + ), + } + + status = check_sync(sync, config, vol_statuses) + assert status.active is False + assert SyncReason.SOURCE_SENTINEL_NOT_FOUND in status.reasons + + def _setup_active_sentinels(self, src: Path, dst: Path) -> None: + (src / ".nbkp-vol").touch() + (dst / ".nbkp-vol").touch() + (src / "data").mkdir(exist_ok=True) + (src / "data" / ".nbkp-src").touch() + (dst / "backup").mkdir(exist_ok=True) + (dst / "backup" / ".nbkp-dst").touch() + + def _make_active_vol_statuses( + self, config: Config + ) -> dict[str, VolumeStatus]: + return { + "src": VolumeStatus( + slug="src", + config=config.volumes["src"], + reasons=[], + ), + "dst": VolumeStatus( + slug="dst", + config=config.volumes["dst"], + reasons=[], + ), + } + + @patch("nbkp.check.shutil.which", return_value=None) + def test_rsync_not_found_on_source( + self, mock_which: MagicMock, tmp_path: Path + ) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + self._setup_active_sentinels(src, dst) + + config, sync = self._make_config(src, dst) + vol_statuses = self._make_active_vol_statuses(config) + + status = check_sync(sync, config, vol_statuses) + assert status.active is False + assert SyncReason.RSYNC_NOT_FOUND_ON_SOURCE in status.reasons + assert SyncReason.RSYNC_NOT_FOUND_ON_DESTINATION in status.reasons + + @patch( + "nbkp.check.shutil.which", + side_effect=lambda cmd: ( + None if cmd == "btrfs" else f"/usr/bin/{cmd}" + ), + ) + def test_rsync_found_btrfs_not_found_on_destination( + self, mock_which: MagicMock, tmp_path: Path + ) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + self._setup_active_sentinels(src, dst) + + src_vol = LocalVolume(slug="src", path=str(src)) + dst_vol = LocalVolume(slug="dst", path=str(dst)) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="data"), + destination=DestinationSyncEndpoint( + volume="dst", + subdir="backup", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + config = Config( + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + vol_statuses = self._make_active_vol_statuses(config) + + status = check_sync(sync, config, vol_statuses) + assert status.active is False + assert SyncReason.BTRFS_NOT_FOUND_ON_DESTINATION in status.reasons + + @patch( + "nbkp.check.shutil.which", + side_effect=lambda cmd: (None if cmd == "stat" else f"/usr/bin/{cmd}"), + ) + def test_stat_not_found_on_destination( + self, mock_which: MagicMock, tmp_path: Path + ) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + self._setup_active_sentinels(src, dst) + + src_vol = LocalVolume(slug="src", path=str(src)) + dst_vol = LocalVolume(slug="dst", path=str(dst)) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="data"), + destination=DestinationSyncEndpoint( + volume="dst", + subdir="backup", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + config = Config( + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + vol_statuses = self._make_active_vol_statuses(config) + + status = check_sync(sync, config, vol_statuses) + assert status.active is False + assert SyncReason.STAT_NOT_FOUND_ON_DESTINATION in status.reasons + assert SyncReason.DESTINATION_NOT_BTRFS not in status.reasons + assert SyncReason.DESTINATION_NOT_BTRFS_SUBVOLUME not in status.reasons + + @patch("nbkp.check.subprocess.run") + @patch( + "nbkp.check.shutil.which", + side_effect=lambda cmd: ( + None if cmd == "findmnt" else f"/usr/bin/{cmd}" + ), + ) + def test_findmnt_not_found_on_destination( + self, + mock_which: MagicMock, + mock_subprocess: MagicMock, + tmp_path: Path, + ) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + self._setup_active_sentinels(src, dst) + (dst / "backup" / "latest").mkdir() + (dst / "backup" / "snapshots").mkdir() + + src_vol = LocalVolume(slug="src", path=str(src)) + dst_vol = LocalVolume(slug="dst", path=str(dst)) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="data"), + destination=DestinationSyncEndpoint( + volume="dst", + subdir="backup", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + config = Config( + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + vol_statuses = self._make_active_vol_statuses(config) + + def subprocess_side_effect( + cmd: list[str], **kwargs: object + ) -> MagicMock: + if cmd[:4] == ["stat", "-f", "-c", "%T"]: + return MagicMock(returncode=0, stdout="btrfs\n") + if cmd[:3] == ["stat", "-c", "%i"]: + return MagicMock(returncode=0, stdout="256\n") + return MagicMock(returncode=0) + + mock_subprocess.side_effect = subprocess_side_effect + + status = check_sync(sync, config, vol_statuses) + assert status.active is False + assert SyncReason.FINDMNT_NOT_FOUND_ON_DESTINATION in status.reasons + assert ( + SyncReason.DESTINATION_NOT_MOUNTED_USER_SUBVOL_RM + not in status.reasons + ) + + @patch( + "nbkp.check.shutil.which", + side_effect=lambda cmd: ( + None if cmd in ("stat", "findmnt") else f"/usr/bin/{cmd}" + ), + ) + def test_stat_and_findmnt_both_missing( + self, mock_which: MagicMock, tmp_path: Path + ) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + self._setup_active_sentinels(src, dst) + + src_vol = LocalVolume(slug="src", path=str(src)) + dst_vol = LocalVolume(slug="dst", path=str(dst)) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="data"), + destination=DestinationSyncEndpoint( + volume="dst", + subdir="backup", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + config = Config( + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + vol_statuses = self._make_active_vol_statuses(config) + + status = check_sync(sync, config, vol_statuses) + assert status.active is False + assert SyncReason.STAT_NOT_FOUND_ON_DESTINATION in status.reasons + assert SyncReason.FINDMNT_NOT_FOUND_ON_DESTINATION in status.reasons + + @patch("nbkp.check.subprocess.run") + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/fake", + ) + def test_destination_not_btrfs_filesystem( + self, + mock_which: MagicMock, + mock_subprocess: MagicMock, + tmp_path: Path, + ) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + self._setup_active_sentinels(src, dst) + + src_vol = LocalVolume(slug="src", path=str(src)) + dst_vol = LocalVolume(slug="dst", path=str(dst)) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="data"), + destination=DestinationSyncEndpoint( + volume="dst", + subdir="backup", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + config = Config( + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + vol_statuses = self._make_active_vol_statuses(config) + + mock_subprocess.return_value = MagicMock( + returncode=0, stdout="ext2/ext3\n" + ) + + status = check_sync(sync, config, vol_statuses) + assert status.active is False + assert SyncReason.DESTINATION_NOT_BTRFS in status.reasons + + @patch("nbkp.check.subprocess.run") + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/fake", + ) + def test_destination_not_btrfs_subvolume( + self, + mock_which: MagicMock, + mock_subprocess: MagicMock, + tmp_path: Path, + ) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + self._setup_active_sentinels(src, dst) + + src_vol = LocalVolume(slug="src", path=str(src)) + dst_vol = LocalVolume(slug="dst", path=str(dst)) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="data"), + destination=DestinationSyncEndpoint( + volume="dst", + subdir="backup", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + config = Config( + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + vol_statuses = self._make_active_vol_statuses(config) + + def subprocess_side_effect( + cmd: list[str], **kwargs: object + ) -> MagicMock: + if cmd[:4] == ["stat", "-f", "-c", "%T"]: + return MagicMock(returncode=0, stdout="btrfs\n") + if cmd[:3] == ["stat", "-c", "%i"]: + return MagicMock(returncode=0, stdout="1234\n") + return MagicMock(returncode=0) + + mock_subprocess.side_effect = subprocess_side_effect + + status = check_sync(sync, config, vol_statuses) + assert status.active is False + assert SyncReason.DESTINATION_NOT_BTRFS_SUBVOLUME in status.reasons + + @patch("nbkp.check.subprocess.run") + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/fake", + ) + def test_destination_not_mounted_user_subvol_rm( + self, + mock_which: MagicMock, + mock_subprocess: MagicMock, + tmp_path: Path, + ) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + self._setup_active_sentinels(src, dst) + (dst / "backup" / "latest").mkdir() + (dst / "backup" / "snapshots").mkdir() + + src_vol = LocalVolume(slug="src", path=str(src)) + dst_vol = LocalVolume(slug="dst", path=str(dst)) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="data"), + destination=DestinationSyncEndpoint( + volume="dst", + subdir="backup", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + config = Config( + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + vol_statuses = self._make_active_vol_statuses(config) + + def subprocess_side_effect( + cmd: list[str], **kwargs: object + ) -> MagicMock: + if cmd[:4] == ["stat", "-f", "-c", "%T"]: + return MagicMock(returncode=0, stdout="btrfs\n") + if cmd[:3] == ["stat", "-c", "%i"]: + return MagicMock(returncode=0, stdout="256\n") + if cmd[0] == "findmnt": + return MagicMock(returncode=0, stdout="rw,relatime\n") + return MagicMock(returncode=0) + + mock_subprocess.side_effect = subprocess_side_effect + + status = check_sync(sync, config, vol_statuses) + assert status.active is False + assert ( + SyncReason.DESTINATION_NOT_MOUNTED_USER_SUBVOL_RM in status.reasons + ) + + @patch("nbkp.check.subprocess.run") + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/fake", + ) + def test_destination_latest_not_found( + self, + mock_which: MagicMock, + mock_subprocess: MagicMock, + tmp_path: Path, + ) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + self._setup_active_sentinels(src, dst) + # snapshots exists but latest does not + (dst / "backup" / "snapshots").mkdir() + + src_vol = LocalVolume(slug="src", path=str(src)) + dst_vol = LocalVolume(slug="dst", path=str(dst)) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="data"), + destination=DestinationSyncEndpoint( + volume="dst", + subdir="backup", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + config = Config( + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + vol_statuses = self._make_active_vol_statuses(config) + + def subprocess_side_effect( + cmd: list[str], **kwargs: object + ) -> MagicMock: + if cmd[:4] == ["stat", "-f", "-c", "%T"]: + return MagicMock(returncode=0, stdout="btrfs\n") + if cmd[:3] == ["stat", "-c", "%i"]: + return MagicMock(returncode=0, stdout="256\n") + if cmd[0] == "findmnt": + return MagicMock( + returncode=0, + stdout="rw,user_subvol_rm_allowed\n", + ) + return MagicMock(returncode=0) + + mock_subprocess.side_effect = subprocess_side_effect + + status = check_sync(sync, config, vol_statuses) + assert status.active is False + assert SyncReason.DESTINATION_LATEST_NOT_FOUND in status.reasons + assert ( + SyncReason.DESTINATION_SNAPSHOTS_DIR_NOT_FOUND + not in status.reasons + ) + + @patch("nbkp.check.subprocess.run") + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/fake", + ) + def test_destination_snapshots_dir_not_found( + self, + mock_which: MagicMock, + mock_subprocess: MagicMock, + tmp_path: Path, + ) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + self._setup_active_sentinels(src, dst) + # latest exists but snapshots does not + (dst / "backup" / "latest").mkdir() + + src_vol = LocalVolume(slug="src", path=str(src)) + dst_vol = LocalVolume(slug="dst", path=str(dst)) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="data"), + destination=DestinationSyncEndpoint( + volume="dst", + subdir="backup", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + config = Config( + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + vol_statuses = self._make_active_vol_statuses(config) + + def subprocess_side_effect( + cmd: list[str], **kwargs: object + ) -> MagicMock: + if cmd[:4] == ["stat", "-f", "-c", "%T"]: + return MagicMock(returncode=0, stdout="btrfs\n") + if cmd[:3] == ["stat", "-c", "%i"]: + return MagicMock(returncode=0, stdout="256\n") + if cmd[0] == "findmnt": + return MagicMock( + returncode=0, + stdout="rw,user_subvol_rm_allowed\n", + ) + return MagicMock(returncode=0) + + mock_subprocess.side_effect = subprocess_side_effect + + status = check_sync(sync, config, vol_statuses) + assert status.active is False + assert SyncReason.DESTINATION_SNAPSHOTS_DIR_NOT_FOUND in status.reasons + assert SyncReason.DESTINATION_LATEST_NOT_FOUND not in status.reasons + + @patch("nbkp.check.subprocess.run") + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/fake", + ) + def test_destination_latest_and_snapshots_both_missing( + self, + mock_which: MagicMock, + mock_subprocess: MagicMock, + tmp_path: Path, + ) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + self._setup_active_sentinels(src, dst) + # neither latest nor snapshots exist + + src_vol = LocalVolume(slug="src", path=str(src)) + dst_vol = LocalVolume(slug="dst", path=str(dst)) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="data"), + destination=DestinationSyncEndpoint( + volume="dst", + subdir="backup", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + config = Config( + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + vol_statuses = self._make_active_vol_statuses(config) + + def subprocess_side_effect( + cmd: list[str], **kwargs: object + ) -> MagicMock: + if cmd[:4] == ["stat", "-f", "-c", "%T"]: + return MagicMock(returncode=0, stdout="btrfs\n") + if cmd[:3] == ["stat", "-c", "%i"]: + return MagicMock(returncode=0, stdout="256\n") + if cmd[0] == "findmnt": + return MagicMock( + returncode=0, + stdout="rw,user_subvol_rm_allowed\n", + ) + return MagicMock(returncode=0) + + mock_subprocess.side_effect = subprocess_side_effect + + status = check_sync(sync, config, vol_statuses) + assert status.active is False + assert SyncReason.DESTINATION_LATEST_NOT_FOUND in status.reasons + assert SyncReason.DESTINATION_SNAPSHOTS_DIR_NOT_FOUND in status.reasons + + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/rsync", + ) + def test_btrfs_check_skipped_when_not_enabled( + self, mock_which: MagicMock, tmp_path: Path + ) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + self._setup_active_sentinels(src, dst) + + config, sync = self._make_config(src, dst) + vol_statuses = self._make_active_vol_statuses(config) + + status = check_sync(sync, config, vol_statuses) + assert status.active is True + assert status.reasons == [] + + @patch("nbkp.check.shutil.which", return_value=None) + def test_multiple_failures_accumulated( + self, mock_which: MagicMock, tmp_path: Path + ) -> None: + """Source sentinel missing AND rsync missing on both sides.""" + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + (src / ".nbkp-vol").touch() + (dst / ".nbkp-vol").touch() + (src / "data").mkdir() + # No .nbkp-src sentinel + (dst / "backup").mkdir() + (dst / "backup" / ".nbkp-dst").touch() + + config, sync = self._make_config(src, dst) + vol_statuses = self._make_active_vol_statuses(config) + + status = check_sync(sync, config, vol_statuses) + assert status.active is False + assert SyncReason.SOURCE_SENTINEL_NOT_FOUND in status.reasons + assert SyncReason.RSYNC_NOT_FOUND_ON_SOURCE in status.reasons + assert SyncReason.RSYNC_NOT_FOUND_ON_DESTINATION in status.reasons + + def test_both_volumes_unavailable(self, tmp_path: Path) -> None: + """Both source and destination unavailable.""" + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + + config, sync = self._make_config(src, dst) + vol_statuses = { + "src": VolumeStatus( + slug="src", + config=config.volumes["src"], + reasons=[VolumeReason.SENTINEL_NOT_FOUND], + ), + "dst": VolumeStatus( + slug="dst", + config=config.volumes["dst"], + reasons=[VolumeReason.SENTINEL_NOT_FOUND], + ), + } + + status = check_sync(sync, config, vol_statuses) + assert status.active is False + assert SyncReason.SOURCE_UNAVAILABLE in status.reasons + assert SyncReason.DESTINATION_UNAVAILABLE in status.reasons + + +class TestCheckSyncRemoteCommands: + @patch("nbkp.check.run_remote_command") + def test_rsync_not_found_on_remote_source( + self, mock_run: MagicMock, tmp_path: Path + ) -> None: + dst = tmp_path / "dst" + dst.mkdir() + (dst / ".nbkp-vol").touch() + (dst / "backup").mkdir() + (dst / "backup" / ".nbkp-dst").touch() + + src_server = SshEndpoint(slug="src-server", host="src.local") + src_vol = RemoteVolume( + slug="src", + ssh_endpoint="src-server", + path="/data", + ) + dst_vol = LocalVolume(slug="dst", path=str(dst)) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="data"), + destination=DestinationSyncEndpoint(volume="dst", subdir="backup"), + ) + config = Config( + ssh_endpoints={"src-server": src_server}, + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + vol_statuses = { + "src": VolumeStatus( + slug="src", + config=src_vol, + reasons=[], + ), + "dst": VolumeStatus( + slug="dst", + config=dst_vol, + reasons=[], + ), + } + + def remote_side_effect( + server: SshEndpoint, + cmd: list[str], + proxy_chain: list[SshEndpoint] | None = None, + ) -> MagicMock: + if cmd == ["test", "-f", "/data/data/.nbkp-src"]: + return MagicMock(returncode=0) + if cmd == ["which", "rsync"]: + return MagicMock(returncode=1) + return MagicMock(returncode=0) + + mock_run.side_effect = remote_side_effect + + status = check_sync(sync, config, vol_statuses, _make_resolved(config)) + assert status.active is False + assert SyncReason.RSYNC_NOT_FOUND_ON_SOURCE in status.reasons + + @patch("nbkp.check.run_remote_command") + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/rsync", + ) + def test_rsync_not_found_on_remote_destination( + self, + mock_which: MagicMock, + mock_run: MagicMock, + tmp_path: Path, + ) -> None: + src = tmp_path / "src" + src.mkdir() + (src / ".nbkp-vol").touch() + (src / "data").mkdir() + (src / "data" / ".nbkp-src").touch() + + dst_server = SshEndpoint(slug="dst-server", host="dst.local") + src_vol = LocalVolume(slug="src", path=str(src)) + dst_vol = RemoteVolume( + slug="dst", + ssh_endpoint="dst-server", + path="/backup", + ) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="data"), + destination=DestinationSyncEndpoint(volume="dst", subdir="backup"), + ) + config = Config( + ssh_endpoints={"dst-server": dst_server}, + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + vol_statuses = { + "src": VolumeStatus( + slug="src", + config=src_vol, + reasons=[], + ), + "dst": VolumeStatus( + slug="dst", + config=dst_vol, + reasons=[], + ), + } + + def remote_side_effect( + server: SshEndpoint, + cmd: list[str], + proxy_chain: list[SshEndpoint] | None = None, + ) -> MagicMock: + if cmd == [ + "test", + "-f", + "/backup/backup/.nbkp-dst", + ]: + return MagicMock(returncode=0) + if cmd == ["which", "rsync"]: + return MagicMock(returncode=1) + return MagicMock(returncode=0) + + mock_run.side_effect = remote_side_effect + + status = check_sync(sync, config, vol_statuses, _make_resolved(config)) + assert status.active is False + assert SyncReason.RSYNC_NOT_FOUND_ON_DESTINATION in status.reasons + + @patch("nbkp.check.run_remote_command") + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/rsync", + ) + def test_btrfs_not_found_on_remote_destination( + self, + mock_which: MagicMock, + mock_run: MagicMock, + tmp_path: Path, + ) -> None: + src = tmp_path / "src" + src.mkdir() + (src / ".nbkp-vol").touch() + (src / "data").mkdir() + (src / "data" / ".nbkp-src").touch() + + dst_server = SshEndpoint(slug="dst-server", host="dst.local") + src_vol = LocalVolume(slug="src", path=str(src)) + dst_vol = RemoteVolume( + slug="dst", + ssh_endpoint="dst-server", + path="/backup", + ) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="data"), + destination=DestinationSyncEndpoint( + volume="dst", + subdir="backup", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + config = Config( + ssh_endpoints={"dst-server": dst_server}, + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + vol_statuses = { + "src": VolumeStatus( + slug="src", + config=src_vol, + reasons=[], + ), + "dst": VolumeStatus( + slug="dst", + config=dst_vol, + reasons=[], + ), + } + + def remote_side_effect( + server: SshEndpoint, + cmd: list[str], + proxy_chain: list[SshEndpoint] | None = None, + ) -> MagicMock: + if cmd == [ + "test", + "-f", + "/backup/backup/.nbkp-dst", + ]: + return MagicMock(returncode=0) + if cmd == ["which", "rsync"]: + return MagicMock(returncode=0) + if cmd == ["which", "btrfs"]: + return MagicMock(returncode=1) + return MagicMock(returncode=0) + + mock_run.side_effect = remote_side_effect + + status = check_sync(sync, config, vol_statuses, _make_resolved(config)) + assert status.active is False + assert SyncReason.BTRFS_NOT_FOUND_ON_DESTINATION in status.reasons + + @patch("nbkp.check.run_remote_command") + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/rsync", + ) + def test_destination_not_btrfs_on_remote( + self, + mock_which: MagicMock, + mock_run: MagicMock, + tmp_path: Path, + ) -> None: + src = tmp_path / "src" + src.mkdir() + (src / ".nbkp-vol").touch() + (src / "data").mkdir() + (src / "data" / ".nbkp-src").touch() + + dst_server = SshEndpoint(slug="dst-server", host="dst.local") + src_vol = LocalVolume(slug="src", path=str(src)) + dst_vol = RemoteVolume( + slug="dst", + ssh_endpoint="dst-server", + path="/backup", + ) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="data"), + destination=DestinationSyncEndpoint( + volume="dst", + subdir="backup", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + config = Config( + ssh_endpoints={"dst-server": dst_server}, + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + vol_statuses = { + "src": VolumeStatus( + slug="src", + config=src_vol, + reasons=[], + ), + "dst": VolumeStatus( + slug="dst", + config=dst_vol, + reasons=[], + ), + } + + def remote_side_effect( + server: SshEndpoint, + cmd: list[str], + proxy_chain: list[SshEndpoint] | None = None, + ) -> MagicMock: + if cmd == [ + "test", + "-f", + "/backup/backup/.nbkp-dst", + ]: + return MagicMock(returncode=0) + if cmd == ["which", "rsync"]: + return MagicMock(returncode=0) + if cmd == ["which", "btrfs"]: + return MagicMock(returncode=0) + if cmd == [ + "stat", + "-f", + "-c", + "%T", + "/backup", + ]: + return MagicMock(returncode=0, stdout="ext2/ext3\n") + return MagicMock(returncode=0) + + mock_run.side_effect = remote_side_effect + + status = check_sync(sync, config, vol_statuses, _make_resolved(config)) + assert status.active is False + assert SyncReason.DESTINATION_NOT_BTRFS in status.reasons + + @patch("nbkp.check.run_remote_command") + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/rsync", + ) + def test_destination_not_subvolume_on_remote( + self, + mock_which: MagicMock, + mock_run: MagicMock, + tmp_path: Path, + ) -> None: + src = tmp_path / "src" + src.mkdir() + (src / ".nbkp-vol").touch() + (src / "data").mkdir() + (src / "data" / ".nbkp-src").touch() + + dst_server = SshEndpoint(slug="dst-server", host="dst.local") + src_vol = LocalVolume(slug="src", path=str(src)) + dst_vol = RemoteVolume( + slug="dst", + ssh_endpoint="dst-server", + path="/backup", + ) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="backup"), + destination=DestinationSyncEndpoint( + volume="dst", + subdir="backup", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + config = Config( + ssh_endpoints={"dst-server": dst_server}, + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + vol_statuses = { + "src": VolumeStatus( + slug="src", + config=src_vol, + reasons=[], + ), + "dst": VolumeStatus( + slug="dst", + config=dst_vol, + reasons=[], + ), + } + + def remote_side_effect( + server: SshEndpoint, + cmd: list[str], + proxy_chain: list[SshEndpoint] | None = None, + ) -> MagicMock: + if cmd == [ + "test", + "-f", + "/backup/backup/.nbkp-dst", + ]: + return MagicMock(returncode=0) + if cmd == ["which", "rsync"]: + return MagicMock(returncode=0) + if cmd == ["which", "btrfs"]: + return MagicMock(returncode=0) + if cmd == [ + "stat", + "-f", + "-c", + "%T", + "/backup", + ]: + return MagicMock(returncode=0, stdout="btrfs\n") + if cmd == [ + "stat", + "-c", + "%i", + "/backup/backup", + ]: + return MagicMock(returncode=0, stdout="1234\n") + return MagicMock(returncode=0) + + mock_run.side_effect = remote_side_effect + + status = check_sync(sync, config, vol_statuses, _make_resolved(config)) + assert status.active is False + assert SyncReason.DESTINATION_NOT_BTRFS_SUBVOLUME in status.reasons + + @patch("nbkp.check.run_remote_command") + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/rsync", + ) + def test_destination_not_mounted_user_subvol_rm_on_remote( + self, + mock_which: MagicMock, + mock_run: MagicMock, + tmp_path: Path, + ) -> None: + src = tmp_path / "src" + src.mkdir() + (src / ".nbkp-vol").touch() + (src / "data").mkdir() + (src / "data" / ".nbkp-src").touch() + + dst_server = SshEndpoint(slug="dst-server", host="dst.local") + src_vol = LocalVolume(slug="src", path=str(src)) + dst_vol = RemoteVolume( + slug="dst", + ssh_endpoint="dst-server", + path="/backup", + ) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="data"), + destination=DestinationSyncEndpoint( + volume="dst", + subdir="backup", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + config = Config( + ssh_endpoints={"dst-server": dst_server}, + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + vol_statuses = { + "src": VolumeStatus( + slug="src", + config=src_vol, + reasons=[], + ), + "dst": VolumeStatus( + slug="dst", + config=dst_vol, + reasons=[], + ), + } + + def remote_side_effect( + server: SshEndpoint, + cmd: list[str], + proxy_chain: list[SshEndpoint] | None = None, + ) -> MagicMock: + if cmd == [ + "test", + "-f", + "/backup/backup/.nbkp-dst", + ]: + return MagicMock(returncode=0) + if cmd == ["which", "rsync"]: + return MagicMock(returncode=0) + if cmd == ["which", "btrfs"]: + return MagicMock(returncode=0) + if cmd == [ + "stat", + "-f", + "-c", + "%T", + "/backup", + ]: + return MagicMock(returncode=0, stdout="btrfs\n") + if cmd == [ + "stat", + "-c", + "%i", + "/backup/backup", + ]: + return MagicMock(returncode=0, stdout="256\n") + if cmd == [ + "findmnt", + "-n", + "-o", + "OPTIONS", + "/backup", + ]: + return MagicMock(returncode=0, stdout="rw,relatime\n") + return MagicMock(returncode=0) + + mock_run.side_effect = remote_side_effect + + status = check_sync(sync, config, vol_statuses, _make_resolved(config)) + assert status.active is False + assert ( + SyncReason.DESTINATION_NOT_MOUNTED_USER_SUBVOL_RM in status.reasons + ) + + @patch("nbkp.check.run_remote_command") + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/rsync", + ) + def test_stat_not_found_on_remote_destination( + self, + mock_which: MagicMock, + mock_run: MagicMock, + tmp_path: Path, + ) -> None: + src = tmp_path / "src" + src.mkdir() + (src / ".nbkp-vol").touch() + (src / "data").mkdir() + (src / "data" / ".nbkp-src").touch() + + dst_server = SshEndpoint(slug="dst-server", host="dst.local") + src_vol = LocalVolume(slug="src", path=str(src)) + dst_vol = RemoteVolume( + slug="dst", + ssh_endpoint="dst-server", + path="/backup", + ) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="data"), + destination=DestinationSyncEndpoint( + volume="dst", + subdir="backup", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + config = Config( + ssh_endpoints={"dst-server": dst_server}, + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + vol_statuses = { + "src": VolumeStatus( + slug="src", + config=src_vol, + reasons=[], + ), + "dst": VolumeStatus( + slug="dst", + config=dst_vol, + reasons=[], + ), + } + + def remote_side_effect( + server: SshEndpoint, + cmd: list[str], + proxy_chain: list[SshEndpoint] | None = None, + ) -> MagicMock: + if cmd == [ + "test", + "-f", + "/backup/backup/.nbkp-dst", + ]: + return MagicMock(returncode=0) + if cmd == ["which", "rsync"]: + return MagicMock(returncode=0) + if cmd == ["which", "btrfs"]: + return MagicMock(returncode=0) + if cmd == ["which", "stat"]: + return MagicMock(returncode=1) + return MagicMock(returncode=0) + + mock_run.side_effect = remote_side_effect + + status = check_sync(sync, config, vol_statuses, _make_resolved(config)) + assert status.active is False + assert SyncReason.STAT_NOT_FOUND_ON_DESTINATION in status.reasons + assert SyncReason.DESTINATION_NOT_BTRFS not in status.reasons + + @patch("nbkp.check.run_remote_command") + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/rsync", + ) + def test_findmnt_not_found_on_remote_destination( + self, + mock_which: MagicMock, + mock_run: MagicMock, + tmp_path: Path, + ) -> None: + src = tmp_path / "src" + src.mkdir() + (src / ".nbkp-vol").touch() + (src / "data").mkdir() + (src / "data" / ".nbkp-src").touch() + + dst_server = SshEndpoint(slug="dst-server", host="dst.local") + src_vol = LocalVolume(slug="src", path=str(src)) + dst_vol = RemoteVolume( + slug="dst", + ssh_endpoint="dst-server", + path="/backup", + ) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="data"), + destination=DestinationSyncEndpoint( + volume="dst", + subdir="backup", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + config = Config( + ssh_endpoints={"dst-server": dst_server}, + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + vol_statuses = { + "src": VolumeStatus( + slug="src", + config=src_vol, + reasons=[], + ), + "dst": VolumeStatus( + slug="dst", + config=dst_vol, + reasons=[], + ), + } + + def remote_side_effect( + server: SshEndpoint, + cmd: list[str], + proxy_chain: list[SshEndpoint] | None = None, + ) -> MagicMock: + if cmd == [ + "test", + "-f", + "/backup/backup/.nbkp-dst", + ]: + return MagicMock(returncode=0) + if cmd == ["which", "rsync"]: + return MagicMock(returncode=0) + if cmd == ["which", "btrfs"]: + return MagicMock(returncode=0) + if cmd == ["which", "findmnt"]: + return MagicMock(returncode=1) + if cmd == [ + "stat", + "-f", + "-c", + "%T", + "/backup", + ]: + return MagicMock(returncode=0, stdout="btrfs\n") + if cmd == [ + "stat", + "-c", + "%i", + "/backup/backup", + ]: + return MagicMock(returncode=0, stdout="256\n") + if cmd == ["test", "-d", "/backup/backup/latest"]: + return MagicMock(returncode=0) + if cmd == ["test", "-d", "/backup/backup/snapshots"]: + return MagicMock(returncode=0) + return MagicMock(returncode=0) + + mock_run.side_effect = remote_side_effect + + status = check_sync(sync, config, vol_statuses, _make_resolved(config)) + assert status.active is False + assert SyncReason.FINDMNT_NOT_FOUND_ON_DESTINATION in status.reasons + assert ( + SyncReason.DESTINATION_NOT_MOUNTED_USER_SUBVOL_RM + not in status.reasons + ) + + @patch("nbkp.check.run_remote_command") + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/rsync", + ) + def test_destination_latest_not_found_on_remote( + self, + mock_which: MagicMock, + mock_run: MagicMock, + tmp_path: Path, + ) -> None: + src = tmp_path / "src" + src.mkdir() + (src / ".nbkp-vol").touch() + (src / "data").mkdir() + (src / "data" / ".nbkp-src").touch() + + dst_server = SshEndpoint(slug="dst-server", host="dst.local") + src_vol = LocalVolume(slug="src", path=str(src)) + dst_vol = RemoteVolume( + slug="dst", + ssh_endpoint="dst-server", + path="/backup", + ) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="data"), + destination=DestinationSyncEndpoint( + volume="dst", + subdir="backup", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + config = Config( + ssh_endpoints={"dst-server": dst_server}, + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + vol_statuses = { + "src": VolumeStatus( + slug="src", + config=src_vol, + reasons=[], + ), + "dst": VolumeStatus( + slug="dst", + config=dst_vol, + reasons=[], + ), + } + + def remote_side_effect( + server: SshEndpoint, + cmd: list[str], + proxy_chain: list[SshEndpoint] | None = None, + ) -> MagicMock: + if cmd == [ + "test", + "-f", + "/backup/backup/.nbkp-dst", + ]: + return MagicMock(returncode=0) + if cmd == ["which", "rsync"]: + return MagicMock(returncode=0) + if cmd == ["which", "btrfs"]: + return MagicMock(returncode=0) + if cmd == [ + "stat", + "-f", + "-c", + "%T", + "/backup", + ]: + return MagicMock(returncode=0, stdout="btrfs\n") + if cmd == [ + "stat", + "-c", + "%i", + "/backup/backup", + ]: + return MagicMock(returncode=0, stdout="256\n") + if cmd == [ + "findmnt", + "-n", + "-o", + "OPTIONS", + "/backup", + ]: + return MagicMock( + returncode=0, + stdout="rw,user_subvol_rm_allowed\n", + ) + if cmd == [ + "test", + "-d", + "/backup/backup/latest", + ]: + return MagicMock(returncode=1) + if cmd == [ + "test", + "-d", + "/backup/backup/snapshots", + ]: + return MagicMock(returncode=0) + return MagicMock(returncode=0) + + mock_run.side_effect = remote_side_effect + + status = check_sync(sync, config, vol_statuses, _make_resolved(config)) + assert status.active is False + assert SyncReason.DESTINATION_LATEST_NOT_FOUND in status.reasons + assert ( + SyncReason.DESTINATION_SNAPSHOTS_DIR_NOT_FOUND + not in status.reasons + ) + + @patch("nbkp.check.run_remote_command") + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/rsync", + ) + def test_destination_snapshots_dir_not_found_on_remote( + self, + mock_which: MagicMock, + mock_run: MagicMock, + tmp_path: Path, + ) -> None: + src = tmp_path / "src" + src.mkdir() + (src / ".nbkp-vol").touch() + (src / "data").mkdir() + (src / "data" / ".nbkp-src").touch() + + dst_server = SshEndpoint(slug="dst-server", host="dst.local") + src_vol = LocalVolume(slug="src", path=str(src)) + dst_vol = RemoteVolume( + slug="dst", + ssh_endpoint="dst-server", + path="/backup", + ) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="data"), + destination=DestinationSyncEndpoint( + volume="dst", + subdir="backup", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + config = Config( + ssh_endpoints={"dst-server": dst_server}, + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + vol_statuses = { + "src": VolumeStatus( + slug="src", + config=src_vol, + reasons=[], + ), + "dst": VolumeStatus( + slug="dst", + config=dst_vol, + reasons=[], + ), + } + + def remote_side_effect( + server: SshEndpoint, + cmd: list[str], + proxy_chain: list[SshEndpoint] | None = None, + ) -> MagicMock: + if cmd == [ + "test", + "-f", + "/backup/backup/.nbkp-dst", + ]: + return MagicMock(returncode=0) + if cmd == ["which", "rsync"]: + return MagicMock(returncode=0) + if cmd == ["which", "btrfs"]: + return MagicMock(returncode=0) + if cmd == [ + "stat", + "-f", + "-c", + "%T", + "/backup", + ]: + return MagicMock(returncode=0, stdout="btrfs\n") + if cmd == [ + "stat", + "-c", + "%i", + "/backup/backup", + ]: + return MagicMock(returncode=0, stdout="256\n") + if cmd == [ + "findmnt", + "-n", + "-o", + "OPTIONS", + "/backup", + ]: + return MagicMock( + returncode=0, + stdout="rw,user_subvol_rm_allowed\n", + ) + if cmd == [ + "test", + "-d", + "/backup/backup/latest", + ]: + return MagicMock(returncode=0) + if cmd == [ + "test", + "-d", + "/backup/backup/snapshots", + ]: + return MagicMock(returncode=1) + return MagicMock(returncode=0) + + mock_run.side_effect = remote_side_effect + + status = check_sync(sync, config, vol_statuses, _make_resolved(config)) + assert status.active is False + assert SyncReason.DESTINATION_SNAPSHOTS_DIR_NOT_FOUND in status.reasons + assert SyncReason.DESTINATION_LATEST_NOT_FOUND not in status.reasons + + +class TestCheckAllSyncs: + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/rsync", + ) + def test_check_all(self, mock_which: MagicMock, tmp_path: Path) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + (src / ".nbkp-vol").touch() + (dst / ".nbkp-vol").touch() + (src / ".nbkp-src").touch() + (dst / ".nbkp-dst").touch() + + src_vol = LocalVolume(slug="src", path=str(src)) + dst_vol = LocalVolume(slug="dst", path=str(dst)) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + + vol_statuses, sync_statuses = check_all_syncs(config) + assert vol_statuses["src"].active is True + assert vol_statuses["dst"].active is True + assert sync_statuses["s1"].active is True + + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/rsync", + ) + def test_only_syncs_filters_syncs_and_volumes( + self, mock_which: MagicMock, tmp_path: Path + ) -> None: + """When only_syncs is given, only those syncs and their + referenced volumes are checked.""" + src1 = tmp_path / "src1" + dst1 = tmp_path / "dst1" + src2 = tmp_path / "src2" + dst2 = tmp_path / "dst2" + for d in (src1, dst1, src2, dst2): + d.mkdir() + (d / ".nbkp-vol").touch() + (src1 / ".nbkp-src").touch() + (dst1 / ".nbkp-dst").touch() + (src2 / ".nbkp-src").touch() + (dst2 / ".nbkp-dst").touch() + + config = Config( + volumes={ + "src1": LocalVolume(slug="src1", path=str(src1)), + "dst1": LocalVolume(slug="dst1", path=str(dst1)), + "src2": LocalVolume(slug="src2", path=str(src2)), + "dst2": LocalVolume(slug="dst2", path=str(dst2)), + }, + syncs={ + "s1": SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src1"), + destination=DestinationSyncEndpoint(volume="dst1"), + ), + "s2": SyncConfig( + slug="s2", + source=SyncEndpoint(volume="src2"), + destination=DestinationSyncEndpoint(volume="dst2"), + ), + }, + ) + + vol_statuses, sync_statuses = check_all_syncs( + config, only_syncs=["s1"] + ) + assert set(sync_statuses.keys()) == {"s1"} + assert set(vol_statuses.keys()) == {"src1", "dst1"} + assert sync_statuses["s1"].active is True + + +class TestCheckHardLinkDest: + def _make_hl_config( + self, tmp_src: Path, tmp_dst: Path + ) -> tuple[Config, SyncConfig]: + src_vol = LocalVolume(slug="src", path=str(tmp_src)) + dst_vol = LocalVolume(slug="dst", path=str(tmp_dst)) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="data"), + destination=DestinationSyncEndpoint( + volume="dst", + subdir="backup", + hard_link_snapshots=HardLinkSnapshotConfig(enabled=True), + ), + ) + config = Config( + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + return config, sync + + def _setup_active_sentinels(self, src: Path, dst: Path) -> None: + (src / ".nbkp-vol").touch() + (dst / ".nbkp-vol").touch() + (src / "data").mkdir(exist_ok=True) + (src / "data" / ".nbkp-src").touch() + (dst / "backup").mkdir(exist_ok=True) + (dst / "backup" / ".nbkp-dst").touch() + + def _make_active_vol_statuses( + self, config: Config + ) -> dict[str, VolumeStatus]: + return { + "src": VolumeStatus( + slug="src", + config=config.volumes["src"], + reasons=[], + ), + "dst": VolumeStatus( + slug="dst", + config=config.volumes["dst"], + reasons=[], + ), + } + + @patch("nbkp.check.subprocess.run") + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/fake", + ) + def test_snapshots_dir_not_found( + self, + mock_which: MagicMock, + mock_subprocess: MagicMock, + tmp_path: Path, + ) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + self._setup_active_sentinels(src, dst) + # No snapshots dir + + config, sync = self._make_hl_config(src, dst) + vol_statuses = self._make_active_vol_statuses(config) + + mock_subprocess.return_value = MagicMock( + returncode=0, stdout="ext2/ext3\n" + ) + + status = check_sync(sync, config, vol_statuses) + assert status.active is False + assert SyncReason.DESTINATION_SNAPSHOTS_DIR_NOT_FOUND in status.reasons + + @patch("nbkp.check.subprocess.run") + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/fake", + ) + def test_no_hardlink_support_fat( + self, + mock_which: MagicMock, + mock_subprocess: MagicMock, + tmp_path: Path, + ) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + self._setup_active_sentinels(src, dst) + (dst / "backup" / "snapshots").mkdir() + + config, sync = self._make_hl_config(src, dst) + vol_statuses = self._make_active_vol_statuses(config) + + mock_subprocess.return_value = MagicMock(returncode=0, stdout="vfat\n") + + status = check_sync(sync, config, vol_statuses) + assert status.active is False + assert SyncReason.DESTINATION_NO_HARDLINK_SUPPORT in status.reasons + + @patch("nbkp.check.subprocess.run") + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/fake", + ) + def test_active_with_ext4( + self, + mock_which: MagicMock, + mock_subprocess: MagicMock, + tmp_path: Path, + ) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + self._setup_active_sentinels(src, dst) + (dst / "backup" / "snapshots").mkdir() + + config, sync = self._make_hl_config(src, dst) + vol_statuses = self._make_active_vol_statuses(config) + + mock_subprocess.return_value = MagicMock( + returncode=0, stdout="ext2/ext3\n" + ) + + status = check_sync(sync, config, vol_statuses) + assert status.active is True + assert status.reasons == [] + + @patch( + "nbkp.check.shutil.which", + side_effect=lambda cmd: (None if cmd == "stat" else f"/usr/bin/{cmd}"), + ) + def test_stat_not_found( + self, + mock_which: MagicMock, + tmp_path: Path, + ) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + self._setup_active_sentinels(src, dst) + + config, sync = self._make_hl_config(src, dst) + vol_statuses = self._make_active_vol_statuses(config) + + status = check_sync(sync, config, vol_statuses) + assert status.active is False + assert SyncReason.STAT_NOT_FOUND_ON_DESTINATION in status.reasons + # No hardlink support check when stat is missing + assert SyncReason.DESTINATION_NO_HARDLINK_SUPPORT not in status.reasons + + @patch("nbkp.check.subprocess.run") + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/fake", + ) + def test_no_btrfs_checks_for_hardlink( + self, + mock_which: MagicMock, + mock_subprocess: MagicMock, + tmp_path: Path, + ) -> None: + """Hard-link mode should not run any btrfs-specific checks.""" + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + self._setup_active_sentinels(src, dst) + (dst / "backup" / "snapshots").mkdir() + + config, sync = self._make_hl_config(src, dst) + vol_statuses = self._make_active_vol_statuses(config) + + mock_subprocess.return_value = MagicMock( + returncode=0, stdout="ext2/ext3\n" + ) + + status = check_sync(sync, config, vol_statuses) + assert SyncReason.BTRFS_NOT_FOUND_ON_DESTINATION not in status.reasons + assert SyncReason.DESTINATION_NOT_BTRFS not in status.reasons + assert SyncReason.DESTINATION_NOT_BTRFS_SUBVOLUME not in status.reasons + assert SyncReason.DESTINATION_LATEST_NOT_FOUND not in status.reasons + + @patch("nbkp.check.run_remote_command") + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/rsync", + ) + def test_remote_no_hardlink_support( + self, + mock_which: MagicMock, + mock_run: MagicMock, + tmp_path: Path, + ) -> None: + src = tmp_path / "src" + src.mkdir() + (src / ".nbkp-vol").touch() + (src / "data").mkdir() + (src / "data" / ".nbkp-src").touch() + + dst_server = SshEndpoint(slug="dst-server", host="dst.local") + src_vol = LocalVolume(slug="src", path=str(src)) + dst_vol = RemoteVolume( + slug="dst", + ssh_endpoint="dst-server", + path="/backup", + ) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="data"), + destination=DestinationSyncEndpoint( + volume="dst", + subdir="backup", + hard_link_snapshots=HardLinkSnapshotConfig(enabled=True), + ), + ) + config = Config( + ssh_endpoints={"dst-server": dst_server}, + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + vol_statuses = { + "src": VolumeStatus( + slug="src", + config=src_vol, + reasons=[], + ), + "dst": VolumeStatus( + slug="dst", + config=dst_vol, + reasons=[], + ), + } + + def remote_side_effect( + server: SshEndpoint, + cmd: list[str], + proxy_chain: list[SshEndpoint] | None = None, + ) -> MagicMock: + if cmd == [ + "test", + "-f", + "/backup/backup/.nbkp-dst", + ]: + return MagicMock(returncode=0) + if cmd == ["which", "rsync"]: + return MagicMock(returncode=0) + if cmd == ["which", "stat"]: + return MagicMock(returncode=0) + if cmd == [ + "stat", + "-f", + "-c", + "%T", + "/backup", + ]: + return MagicMock(returncode=0, stdout="exfat\n") + if cmd == [ + "test", + "-d", + "/backup/backup/snapshots", + ]: + return MagicMock(returncode=0) + return MagicMock(returncode=0) + + mock_run.side_effect = remote_side_effect + + status = check_sync(sync, config, vol_statuses, _make_resolved(config)) + assert status.active is False + assert SyncReason.DESTINATION_NO_HARDLINK_SUPPORT in status.reasons + + +class TestCheckSourceLatest: + """Tests for SOURCE_LATEST_NOT_FOUND check.""" + + def _make_config( + self, + tmp_src: Path, + tmp_dst: Path, + source_snapshot: str = "btrfs", + ) -> tuple[Config, SyncConfig]: + src_vol = LocalVolume(slug="src", path=str(tmp_src)) + dst_vol = LocalVolume(slug="dst", path=str(tmp_dst)) + source = ( + SyncEndpoint( + volume="src", + subdir="data", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ) + if source_snapshot == "btrfs" + else SyncEndpoint( + volume="src", + subdir="data", + hard_link_snapshots=HardLinkSnapshotConfig(enabled=True), + ) + ) + sync = SyncConfig( + slug="s1", + source=source, + destination=DestinationSyncEndpoint(volume="dst", subdir="backup"), + ) + config = Config( + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + return config, sync + + def _setup_sentinels(self, src: Path, dst: Path) -> None: + (src / ".nbkp-vol").touch() + (dst / ".nbkp-vol").touch() + (src / "data").mkdir(exist_ok=True) + (src / "data" / ".nbkp-src").touch() + (dst / "backup").mkdir(exist_ok=True) + (dst / "backup" / ".nbkp-dst").touch() + + def _active_vol_statuses(self, config: Config) -> dict[str, VolumeStatus]: + return { + "src": VolumeStatus( + slug="src", + config=config.volumes["src"], + reasons=[], + ), + "dst": VolumeStatus( + slug="dst", + config=config.volumes["dst"], + reasons=[], + ), + } + + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/rsync", + ) + def test_btrfs_source_latest_missing( + self, mock_which: MagicMock, tmp_path: Path + ) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + self._setup_sentinels(src, dst) + # No latest/ under source + + config, sync = self._make_config(src, dst, "btrfs") + vol_statuses = self._active_vol_statuses(config) + + status = check_sync(sync, config, vol_statuses) + assert SyncReason.SOURCE_LATEST_NOT_FOUND in status.reasons + + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/rsync", + ) + def test_hard_link_source_latest_missing( + self, mock_which: MagicMock, tmp_path: Path + ) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + self._setup_sentinels(src, dst) + # No latest/ under source + + config, sync = self._make_config(src, dst, "hard-link") + vol_statuses = self._active_vol_statuses(config) + + status = check_sync(sync, config, vol_statuses) + assert SyncReason.SOURCE_LATEST_NOT_FOUND in status.reasons + + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/rsync", + ) + def test_btrfs_source_latest_present( + self, mock_which: MagicMock, tmp_path: Path + ) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + self._setup_sentinels(src, dst) + (src / "data" / "latest").mkdir() + + config, sync = self._make_config(src, dst, "btrfs") + vol_statuses = self._active_vol_statuses(config) + + status = check_sync(sync, config, vol_statuses) + assert SyncReason.SOURCE_LATEST_NOT_FOUND not in status.reasons + + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/rsync", + ) + def test_hard_link_source_latest_symlink( + self, mock_which: MagicMock, tmp_path: Path + ) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + self._setup_sentinels(src, dst) + snap = src / "data" / "snapshots" / "2024-01-01T00:00:00.000Z" + snap.mkdir(parents=True) + (src / "data" / "latest").symlink_to( + "snapshots/2024-01-01T00:00:00.000Z" + ) + + config, sync = self._make_config(src, dst, "hard-link") + vol_statuses = self._active_vol_statuses(config) + + status = check_sync(sync, config, vol_statuses) + assert SyncReason.SOURCE_LATEST_NOT_FOUND not in status.reasons + + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/rsync", + ) + def test_no_snapshots_source_skips_check( + self, mock_which: MagicMock, tmp_path: Path + ) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + self._setup_sentinels(src, dst) + # No latest/ but source has no snapshots + + src_vol = LocalVolume(slug="src", path=str(src)) + dst_vol = LocalVolume(slug="dst", path=str(dst)) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="data"), + destination=DestinationSyncEndpoint(volume="dst", subdir="backup"), + ) + config = Config( + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + vol_statuses = self._active_vol_statuses(config) + + status = check_sync(sync, config, vol_statuses) + assert SyncReason.SOURCE_LATEST_NOT_FOUND not in status.reasons + + +class TestCheckSourceSnapshots: + """Tests for SOURCE_SNAPSHOTS_DIR_NOT_FOUND check.""" + + def _make_config( + self, + tmp_src: Path, + tmp_dst: Path, + source_snapshot: str = "btrfs", + ) -> tuple[Config, SyncConfig]: + src_vol = LocalVolume(slug="src", path=str(tmp_src)) + dst_vol = LocalVolume(slug="dst", path=str(tmp_dst)) + source = ( + SyncEndpoint( + volume="src", + subdir="data", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ) + if source_snapshot == "btrfs" + else SyncEndpoint( + volume="src", + subdir="data", + hard_link_snapshots=HardLinkSnapshotConfig(enabled=True), + ) + ) + sync = SyncConfig( + slug="s1", + source=source, + destination=DestinationSyncEndpoint(volume="dst", subdir="backup"), + ) + config = Config( + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + return config, sync + + def _setup_sentinels(self, src: Path, dst: Path) -> None: + (src / ".nbkp-vol").touch() + (dst / ".nbkp-vol").touch() + (src / "data").mkdir(exist_ok=True) + (src / "data" / ".nbkp-src").touch() + (dst / "backup").mkdir(exist_ok=True) + (dst / "backup" / ".nbkp-dst").touch() + + def _active_vol_statuses(self, config: Config) -> dict[str, VolumeStatus]: + return { + "src": VolumeStatus( + slug="src", + config=config.volumes["src"], + reasons=[], + ), + "dst": VolumeStatus( + slug="dst", + config=config.volumes["dst"], + reasons=[], + ), + } + + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/rsync", + ) + def test_btrfs_source_snapshots_missing( + self, mock_which: MagicMock, tmp_path: Path + ) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + self._setup_sentinels(src, dst) + (src / "data" / "latest").mkdir() + # No snapshots/ dir + + config, sync = self._make_config(src, dst, "btrfs") + vol_statuses = self._active_vol_statuses(config) + + status = check_sync(sync, config, vol_statuses) + assert SyncReason.SOURCE_SNAPSHOTS_DIR_NOT_FOUND in status.reasons + + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/rsync", + ) + def test_hard_link_source_snapshots_missing( + self, mock_which: MagicMock, tmp_path: Path + ) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + self._setup_sentinels(src, dst) + (src / "data" / "latest").mkdir() + # No snapshots/ dir + + config, sync = self._make_config(src, dst, "hard-link") + vol_statuses = self._active_vol_statuses(config) + + status = check_sync(sync, config, vol_statuses) + assert SyncReason.SOURCE_SNAPSHOTS_DIR_NOT_FOUND in status.reasons + + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/rsync", + ) + def test_btrfs_source_snapshots_present( + self, mock_which: MagicMock, tmp_path: Path + ) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + self._setup_sentinels(src, dst) + (src / "data" / "latest").mkdir() + (src / "data" / "snapshots").mkdir() + + config, sync = self._make_config(src, dst, "btrfs") + vol_statuses = self._active_vol_statuses(config) + + status = check_sync(sync, config, vol_statuses) + assert SyncReason.SOURCE_SNAPSHOTS_DIR_NOT_FOUND not in status.reasons + + @patch( + "nbkp.check.shutil.which", + return_value="/usr/bin/rsync", + ) + def test_no_snapshots_source_skips_check( + self, mock_which: MagicMock, tmp_path: Path + ) -> None: + src = tmp_path / "src" + dst = tmp_path / "dst" + src.mkdir() + dst.mkdir() + self._setup_sentinels(src, dst) + # No snapshots/ but source has no snapshots enabled + + src_vol = LocalVolume(slug="src", path=str(src)) + dst_vol = LocalVolume(slug="dst", path=str(dst)) + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src", subdir="data"), + destination=DestinationSyncEndpoint(volume="dst", subdir="backup"), + ) + config = Config( + volumes={"src": src_vol, "dst": dst_vol}, + syncs={"s1": sync}, + ) + vol_statuses = self._active_vol_statuses(config) + + status = check_sync(sync, config, vol_statuses) + assert SyncReason.SOURCE_SNAPSHOTS_DIR_NOT_FOUND not in status.reasons + + +class TestCheckRemoteVolumeSpaces: + @patch("nbkp.check.run_remote_command") + def test_active(self, mock_run: MagicMock) -> None: + mock_run.return_value = MagicMock(returncode=0) + vol, config = _remote_config(path="/my backup") + resolved = _make_resolved(config) + status = check_volume(vol, resolved) + assert status.active is True + server = config.ssh_endpoints["nas-server"] + mock_run.assert_called_once_with( + server, + ["test", "-f", "/my backup/.nbkp-vol"], + [], + ) diff --git a/tests/test_cli.py b/tests/test_cli.py new file mode 100644 index 0000000..3856481 --- /dev/null +++ b/tests/test_cli.py @@ -0,0 +1,1023 @@ +"""Tests for nbkp.cli.""" + +from __future__ import annotations + +import json +import re +from unittest.mock import MagicMock, patch + +from typer.testing import CliRunner + +from nbkp.cli import app +from nbkp.config import ( + BtrfsSnapshotConfig, + Config, + DestinationSyncEndpoint, + LocalVolume, + RemoteVolume, + SshEndpoint, + SyncConfig, + SyncEndpoint, +) +from nbkp.sync import ProgressMode, SyncResult +from nbkp.check import ( + SyncReason, + SyncStatus, + VolumeReason, + VolumeStatus, +) + +runner = CliRunner() + + +def _strip_panel(text: str) -> str: + """Strip Rich panel border characters and normalize whitespace.""" + text = re.sub(r"[╭╮╰╯│─]", "", text) + return re.sub(r"\s+", " ", text).strip() + + +def _sample_config() -> Config: + src = LocalVolume(slug="local-data", path="/mnt/data") + nas_server = SshEndpoint( + slug="nas-server", + host="nas.example.com", + port=5022, + user="backup", + ) + dst = RemoteVolume( + slug="nas", + ssh_endpoint="nas-server", + path="/volume1/backups", + ) + sync = SyncConfig( + slug="photos-to-nas", + source=SyncEndpoint(volume="local-data", subdir="photos"), + destination=DestinationSyncEndpoint( + volume="nas", subdir="photos-backup" + ), + ) + return Config( + ssh_endpoints={"nas-server": nas_server}, + volumes={"local-data": src, "nas": dst}, + syncs={"photos-to-nas": sync}, + ) + + +def _sample_vol_statuses( + config: Config, +) -> dict[str, VolumeStatus]: + return { + "local-data": VolumeStatus( + slug="local-data", + config=config.volumes["local-data"], + reasons=[], + ), + "nas": VolumeStatus( + slug="nas", + config=config.volumes["nas"], + reasons=[VolumeReason.UNREACHABLE], + ), + } + + +def _sample_sync_statuses( + config: Config, + vol_statuses: dict[str, VolumeStatus], +) -> dict[str, SyncStatus]: + return { + "photos-to-nas": SyncStatus( + slug="photos-to-nas", + config=config.syncs["photos-to-nas"], + source_status=vol_statuses["local-data"], + destination_status=vol_statuses["nas"], + reasons=[SyncReason.DESTINATION_UNAVAILABLE], + ), + } + + +def _sample_sentinel_only_sync_statuses( + config: Config, + vol_statuses: dict[str, VolumeStatus], +) -> dict[str, SyncStatus]: + return { + "photos-to-nas": SyncStatus( + slug="photos-to-nas", + config=config.syncs["photos-to-nas"], + source_status=vol_statuses["local-data"], + destination_status=vol_statuses["nas"], + reasons=[ + SyncReason.SOURCE_SENTINEL_NOT_FOUND, + SyncReason.DESTINATION_SENTINEL_NOT_FOUND, + ], + ), + } + + +def _sample_all_active_vol_statuses( + config: Config, +) -> dict[str, VolumeStatus]: + return { + "local-data": VolumeStatus( + slug="local-data", + config=config.volumes["local-data"], + reasons=[], + ), + "nas": VolumeStatus( + slug="nas", + config=config.volumes["nas"], + reasons=[], + ), + } + + +def _sample_all_active_sync_statuses( + config: Config, + vol_statuses: dict[str, VolumeStatus], +) -> dict[str, SyncStatus]: + return { + "photos-to-nas": SyncStatus( + slug="photos-to-nas", + config=config.syncs["photos-to-nas"], + source_status=vol_statuses["local-data"], + destination_status=vol_statuses["nas"], + reasons=[], + ), + } + + +class TestConfigShowCommand: + @patch("nbkp.cli.load_config") + def test_human_output(self, mock_load: MagicMock) -> None: + config = _sample_config() + mock_load.return_value = config + + result = runner.invoke( + app, ["config", "show", "--config", "/fake.yaml"] + ) + assert result.exit_code == 0 + assert "Volumes:" in result.output + assert "Syncs:" in result.output + assert "local-data" in result.output + assert "nas" in result.output + assert "photos-to-nas" in result.output + + @patch("nbkp.cli.load_config") + def test_human_output_shows_servers(self, mock_load: MagicMock) -> None: + config = _sample_config() + mock_load.return_value = config + + result = runner.invoke( + app, ["config", "show", "--config", "/fake.yaml"] + ) + assert result.exit_code == 0 + assert "SSH Endpoints:" in result.output + assert "nas-server" in result.output + assert "nas.example.com" in result.output + + @patch("nbkp.cli.load_config") + def test_json_output(self, mock_load: MagicMock) -> None: + config = _sample_config() + mock_load.return_value = config + + result = runner.invoke( + app, + [ + "config", + "show", + "--config", + "/fake.yaml", + "--output", + "json", + ], + ) + assert result.exit_code == 0 + data = json.loads(result.output) + assert "volumes" in data + assert "syncs" in data + assert "ssh-endpoints" in data + + @patch( + "nbkp.cli.load_config", + side_effect=__import__( + "nbkp.config", fromlist=["ConfigError"] + ).ConfigError("bad config"), + ) + def test_config_error(self, mock_load: MagicMock) -> None: + result = runner.invoke( + app, + ["config", "show", "--config", "/bad.yaml"], + ) + assert result.exit_code == 2 + + +class TestCheckCommand: + @patch("nbkp.cli.check_all_syncs") + @patch("nbkp.cli.load_config") + def test_human_output_inactive( + self, mock_load: MagicMock, mock_checks: MagicMock + ) -> None: + config = _sample_config() + mock_load.return_value = config + vol_s = _sample_vol_statuses(config) + sync_s = _sample_sync_statuses(config, vol_s) + mock_checks.return_value = (vol_s, sync_s) + + result = runner.invoke(app, ["check", "--config", "/fake.yaml"]) + assert result.exit_code == 1 + assert "local-data" in result.output + assert "nas" in result.output + assert "active" in result.output + assert "inactive" in result.output + assert "photos-to-nas" in result.output + + @patch("nbkp.cli.check_all_syncs") + @patch("nbkp.cli.load_config") + def test_human_output_all_active( + self, mock_load: MagicMock, mock_checks: MagicMock + ) -> None: + config = _sample_config() + mock_load.return_value = config + vol_s = _sample_all_active_vol_statuses(config) + sync_s = _sample_all_active_sync_statuses(config, vol_s) + mock_checks.return_value = (vol_s, sync_s) + + result = runner.invoke(app, ["check", "--config", "/fake.yaml"]) + assert result.exit_code == 0 + + @patch("nbkp.cli.check_all_syncs") + @patch("nbkp.cli.load_config") + def test_json_output_inactive( + self, mock_load: MagicMock, mock_checks: MagicMock + ) -> None: + config = _sample_config() + mock_load.return_value = config + vol_s = _sample_vol_statuses(config) + sync_s = _sample_sync_statuses(config, vol_s) + mock_checks.return_value = (vol_s, sync_s) + + result = runner.invoke( + app, + [ + "check", + "--config", + "/fake.yaml", + "--output", + "json", + ], + ) + assert result.exit_code == 1 + data = json.loads(result.output) + assert "volumes" in data + assert "syncs" in data + + @patch("nbkp.cli.check_all_syncs") + @patch("nbkp.cli.load_config") + def test_json_output_all_active( + self, mock_load: MagicMock, mock_checks: MagicMock + ) -> None: + config = _sample_config() + mock_load.return_value = config + vol_s = _sample_all_active_vol_statuses(config) + sync_s = _sample_all_active_sync_statuses(config, vol_s) + mock_checks.return_value = (vol_s, sync_s) + + result = runner.invoke( + app, + [ + "check", + "--config", + "/fake.yaml", + "--output", + "json", + ], + ) + assert result.exit_code == 0 + data = json.loads(result.output) + assert "volumes" in data + assert "syncs" in data + + @patch("nbkp.cli.check_all_syncs") + @patch("nbkp.cli.load_config") + def test_sentinel_only_exit_0_by_default( + self, mock_load: MagicMock, mock_checks: MagicMock + ) -> None: + config = _sample_config() + mock_load.return_value = config + vol_s = _sample_all_active_vol_statuses(config) + sync_s = _sample_sentinel_only_sync_statuses(config, vol_s) + mock_checks.return_value = (vol_s, sync_s) + + result = runner.invoke(app, ["check", "--config", "/fake.yaml"]) + assert result.exit_code == 0 + + @patch("nbkp.cli.check_all_syncs") + @patch("nbkp.cli.load_config") + def test_sentinel_only_exit_1_when_strict( + self, mock_load: MagicMock, mock_checks: MagicMock + ) -> None: + config = _sample_config() + mock_load.return_value = config + vol_s = _sample_all_active_vol_statuses(config) + sync_s = _sample_sentinel_only_sync_statuses(config, vol_s) + mock_checks.return_value = (vol_s, sync_s) + + result = runner.invoke( + app, + [ + "check", + "--config", + "/fake.yaml", + "--strict", + ], + ) + assert result.exit_code == 1 + + +class TestRunCommand: + @patch("nbkp.cli.run_all_syncs") + @patch("nbkp.cli.check_all_syncs") + @patch("nbkp.cli.load_config") + def test_successful_run( + self, + mock_load: MagicMock, + mock_checks: MagicMock, + mock_run: MagicMock, + ) -> None: + config = _sample_config() + mock_load.return_value = config + vol_s = _sample_all_active_vol_statuses(config) + sync_s = _sample_all_active_sync_statuses(config, vol_s) + mock_checks.return_value = (vol_s, sync_s) + mock_run.return_value = [ + SyncResult( + sync_slug="photos-to-nas", + success=True, + dry_run=False, + rsync_exit_code=0, + output="done", + ) + ] + + result = runner.invoke(app, ["run", "--config", "/fake.yaml"]) + assert result.exit_code == 0 + assert "photos-to-nas" in result.output + assert "OK" in result.output + call_kwargs = mock_run.call_args + assert call_kwargs.kwargs.get("on_rsync_output") is None + assert callable(call_kwargs.kwargs.get("on_sync_start")) + assert callable(call_kwargs.kwargs.get("on_sync_end")) + + @patch("nbkp.cli.run_all_syncs") + @patch("nbkp.cli.check_all_syncs") + @patch("nbkp.cli.load_config") + def test_displays_status_before_results( + self, + mock_load: MagicMock, + mock_checks: MagicMock, + mock_run: MagicMock, + ) -> None: + config = _sample_config() + mock_load.return_value = config + vol_s = _sample_all_active_vol_statuses(config) + sync_s = _sample_all_active_sync_statuses(config, vol_s) + mock_checks.return_value = (vol_s, sync_s) + mock_run.return_value = [ + SyncResult( + sync_slug="photos-to-nas", + success=True, + dry_run=False, + rsync_exit_code=0, + output="done", + ) + ] + + result = runner.invoke(app, ["run", "--config", "/fake.yaml"]) + assert result.exit_code == 0 + # Status section appears before results section + assert "Volumes:" in result.output + assert "Syncs:" in result.output + vol_pos = result.output.index("Volumes:") + ok_pos = result.output.index("OK") + assert vol_pos < ok_pos + + @patch("nbkp.cli.run_all_syncs") + @patch("nbkp.cli.check_all_syncs") + @patch("nbkp.cli.load_config") + def test_failed_run( + self, + mock_load: MagicMock, + mock_checks: MagicMock, + mock_run: MagicMock, + ) -> None: + config = _sample_config() + mock_load.return_value = config + vol_s = _sample_all_active_vol_statuses(config) + sync_s = _sample_all_active_sync_statuses(config, vol_s) + mock_checks.return_value = (vol_s, sync_s) + mock_run.return_value = [ + SyncResult( + sync_slug="photos-to-nas", + success=False, + dry_run=False, + rsync_exit_code=23, + output="", + error="rsync failed", + ) + ] + + result = runner.invoke(app, ["run", "--config", "/fake.yaml"]) + assert result.exit_code == 1 + assert "FAILED" in result.output + + @patch("nbkp.cli.run_all_syncs") + @patch("nbkp.cli.check_all_syncs") + @patch("nbkp.cli.load_config") + def test_dry_run( + self, + mock_load: MagicMock, + mock_checks: MagicMock, + mock_run: MagicMock, + ) -> None: + config = _sample_config() + mock_load.return_value = config + vol_s = _sample_all_active_vol_statuses(config) + sync_s = _sample_all_active_sync_statuses(config, vol_s) + mock_checks.return_value = (vol_s, sync_s) + mock_run.return_value = [ + SyncResult( + sync_slug="photos-to-nas", + success=True, + dry_run=True, + rsync_exit_code=0, + output="", + ) + ] + + result = runner.invoke( + app, + ["run", "--config", "/fake.yaml", "--dry-run"], + ) + assert result.exit_code == 0 + assert "dry run" in result.output + + @patch("nbkp.cli.run_all_syncs") + @patch("nbkp.cli.check_all_syncs") + @patch("nbkp.cli.load_config") + def test_json_output( + self, + mock_load: MagicMock, + mock_checks: MagicMock, + mock_run: MagicMock, + ) -> None: + config = _sample_config() + mock_load.return_value = config + vol_s = _sample_all_active_vol_statuses(config) + sync_s = _sample_all_active_sync_statuses(config, vol_s) + mock_checks.return_value = (vol_s, sync_s) + mock_run.return_value = [ + SyncResult( + sync_slug="photos-to-nas", + success=True, + dry_run=False, + rsync_exit_code=0, + output="done", + ) + ] + + result = runner.invoke( + app, + [ + "run", + "--config", + "/fake.yaml", + "--output", + "json", + ], + ) + assert result.exit_code == 0 + data = json.loads(result.output) + assert "volumes" in data + assert "syncs" in data + assert "results" in data + assert data["results"][0]["sync_slug"] == "photos-to-nas" + call_kwargs = mock_run.call_args + assert call_kwargs.kwargs.get("on_rsync_output") is None + + @patch("nbkp.cli.run_all_syncs") + @patch("nbkp.cli.check_all_syncs") + @patch("nbkp.cli.load_config") + def test_sync_filter( + self, + mock_load: MagicMock, + mock_checks: MagicMock, + mock_run: MagicMock, + ) -> None: + config = _sample_config() + mock_load.return_value = config + vol_s = _sample_all_active_vol_statuses(config) + sync_s = _sample_all_active_sync_statuses(config, vol_s) + mock_checks.return_value = (vol_s, sync_s) + mock_run.return_value = [ + SyncResult( + sync_slug="photos-to-nas", + success=True, + dry_run=False, + rsync_exit_code=0, + output="", + ) + ] + + result = runner.invoke( + app, + [ + "run", + "--config", + "/fake.yaml", + "--sync", + "photos-to-nas", + ], + ) + assert result.exit_code == 0 + check_kwargs = mock_checks.call_args + assert check_kwargs.kwargs.get("only_syncs") == ["photos-to-nas"] + run_kwargs = mock_run.call_args + assert run_kwargs.kwargs.get("only_syncs") == ["photos-to-nas"] + + @patch("nbkp.cli.run_all_syncs") + @patch("nbkp.cli.check_all_syncs") + @patch("nbkp.cli.load_config") + def test_progress( + self, + mock_load: MagicMock, + mock_checks: MagicMock, + mock_run: MagicMock, + ) -> None: + config = _sample_config() + mock_load.return_value = config + vol_s = _sample_all_active_vol_statuses(config) + sync_s = _sample_all_active_sync_statuses(config, vol_s) + mock_checks.return_value = (vol_s, sync_s) + mock_run.return_value = [ + SyncResult( + sync_slug="photos-to-nas", + success=True, + dry_run=False, + rsync_exit_code=0, + output="", + ) + ] + + result = runner.invoke( + app, + [ + "run", + "--config", + "/fake.yaml", + "--progress", + "per-file", + ], + ) + assert result.exit_code == 0 + call_kwargs = mock_run.call_args + assert call_kwargs.kwargs.get("progress") == ProgressMode.PER_FILE + + @patch("nbkp.cli.run_all_syncs") + @patch("nbkp.cli.check_all_syncs") + @patch("nbkp.cli.load_config") + def test_exits_before_syncs_on_status_error( + self, + mock_load: MagicMock, + mock_checks: MagicMock, + mock_run: MagicMock, + ) -> None: + config = _sample_config() + mock_load.return_value = config + vol_s = _sample_vol_statuses(config) + sync_s = _sample_sync_statuses(config, vol_s) + mock_checks.return_value = (vol_s, sync_s) + + result = runner.invoke(app, ["run", "--config", "/fake.yaml"]) + assert result.exit_code == 1 + mock_run.assert_not_called() + + @patch("nbkp.cli.run_all_syncs") + @patch("nbkp.cli.check_all_syncs") + @patch("nbkp.cli.load_config") + def test_sentinel_only_proceeds_by_default( + self, + mock_load: MagicMock, + mock_checks: MagicMock, + mock_run: MagicMock, + ) -> None: + config = _sample_config() + mock_load.return_value = config + vol_s = _sample_all_active_vol_statuses(config) + sync_s = _sample_sentinel_only_sync_statuses(config, vol_s) + mock_checks.return_value = (vol_s, sync_s) + mock_run.return_value = [ + SyncResult( + sync_slug="photos-to-nas", + success=True, + dry_run=False, + rsync_exit_code=0, + output="done", + ) + ] + + result = runner.invoke(app, ["run", "--config", "/fake.yaml"]) + assert result.exit_code == 0 + mock_run.assert_called_once() + + @patch("nbkp.cli.run_all_syncs") + @patch("nbkp.cli.check_all_syncs") + @patch("nbkp.cli.load_config") + def test_sentinel_only_exits_when_strict( + self, + mock_load: MagicMock, + mock_checks: MagicMock, + mock_run: MagicMock, + ) -> None: + config = _sample_config() + mock_load.return_value = config + vol_s = _sample_all_active_vol_statuses(config) + sync_s = _sample_sentinel_only_sync_statuses(config, vol_s) + mock_checks.return_value = (vol_s, sync_s) + + result = runner.invoke( + app, + [ + "run", + "--config", + "/fake.yaml", + "--strict", + ], + ) + assert result.exit_code == 1 + mock_run.assert_not_called() + + +def _prune_config() -> Config: + src = LocalVolume(slug="src", path="/src") + dst = LocalVolume(slug="dst", path="/dst") + sync = SyncConfig( + slug="s1", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True, max_snapshots=3), + ), + ) + return Config( + volumes={"src": src, "dst": dst}, + syncs={"s1": sync}, + ) + + +def _prune_active_statuses( + config: Config, +) -> tuple[dict[str, VolumeStatus], dict[str, SyncStatus]]: + vol_statuses = { + name: VolumeStatus(slug=name, config=vol, reasons=[]) + for name, vol in config.volumes.items() + } + sync_statuses = { + name: SyncStatus( + slug=name, + config=sync, + source_status=vol_statuses[sync.source.volume], + destination_status=vol_statuses[sync.destination.volume], + reasons=[], + ) + for name, sync in config.syncs.items() + } + return vol_statuses, sync_statuses + + +class TestPruneCommand: + @patch("nbkp.cli.list_snapshots") + @patch("nbkp.cli.btrfs_prune_snapshots") + @patch("nbkp.cli.check_all_syncs") + @patch("nbkp.cli.load_config") + def test_successful_prune( + self, + mock_load: MagicMock, + mock_checks: MagicMock, + mock_prune: MagicMock, + mock_list: MagicMock, + ) -> None: + config = _prune_config() + mock_load.return_value = config + _, sync_s = _prune_active_statuses(config) + mock_checks.return_value = ( + { + name: VolumeStatus( + slug=name, + config=config.volumes[name], + reasons=[], + ) + for name in config.volumes + }, + sync_s, + ) + mock_prune.return_value = ["/dst/snapshots/old1"] + mock_list.return_value = [ + "/dst/snapshots/s2", + "/dst/snapshots/s3", + ] + + result = runner.invoke(app, ["prune", "--config", "/fake.yaml"]) + assert result.exit_code == 0 + assert "OK" in result.output + mock_prune.assert_called_once() + + @patch("nbkp.cli.list_snapshots") + @patch("nbkp.cli.btrfs_prune_snapshots") + @patch("nbkp.cli.check_all_syncs") + @patch("nbkp.cli.load_config") + def test_dry_run( + self, + mock_load: MagicMock, + mock_checks: MagicMock, + mock_prune: MagicMock, + mock_list: MagicMock, + ) -> None: + config = _prune_config() + mock_load.return_value = config + _, sync_s = _prune_active_statuses(config) + mock_checks.return_value = ( + { + name: VolumeStatus( + slug=name, + config=config.volumes[name], + reasons=[], + ) + for name in config.volumes + }, + sync_s, + ) + mock_prune.return_value = ["/dst/snapshots/old1"] + mock_list.return_value = [ + "/dst/snapshots/s1", + "/dst/snapshots/s2", + "/dst/snapshots/s3", + ] + + result = runner.invoke( + app, ["prune", "--config", "/fake.yaml", "--dry-run"] + ) + assert result.exit_code == 0 + assert "dry run" in result.output + mock_prune.assert_called_once() + call_kwargs = mock_prune.call_args + assert call_kwargs.kwargs.get("dry_run") is True + + @patch("nbkp.cli.list_snapshots") + @patch("nbkp.cli.btrfs_prune_snapshots") + @patch("nbkp.cli.check_all_syncs") + @patch("nbkp.cli.load_config") + def test_json_output( + self, + mock_load: MagicMock, + mock_checks: MagicMock, + mock_prune: MagicMock, + mock_list: MagicMock, + ) -> None: + config = _prune_config() + mock_load.return_value = config + _, sync_s = _prune_active_statuses(config) + mock_checks.return_value = ( + { + name: VolumeStatus( + slug=name, + config=config.volumes[name], + reasons=[], + ) + for name in config.volumes + }, + sync_s, + ) + mock_prune.return_value = ["/dst/snapshots/old1"] + mock_list.return_value = [ + "/dst/snapshots/s2", + "/dst/snapshots/s3", + ] + + result = runner.invoke( + app, + [ + "prune", + "--config", + "/fake.yaml", + "--output", + "json", + ], + ) + assert result.exit_code == 0 + data = json.loads(result.output) + assert len(data) == 1 + assert data[0]["sync_slug"] == "s1" + assert len(data[0]["deleted"]) == 1 + + @patch("nbkp.cli.check_all_syncs") + @patch("nbkp.cli.load_config") + def test_no_syncs_to_prune( + self, + mock_load: MagicMock, + mock_checks: MagicMock, + ) -> None: + config = _sample_config() # no btrfs snapshots + mock_load.return_value = config + vol_s = _sample_all_active_vol_statuses(config) + sync_s = _sample_all_active_sync_statuses(config, vol_s) + mock_checks.return_value = (vol_s, sync_s) + + result = runner.invoke(app, ["prune", "--config", "/fake.yaml"]) + assert result.exit_code == 0 + + +class TestConfigError: + @patch( + "nbkp.cli.load_config", + side_effect=__import__( + "nbkp.config", fromlist=["ConfigError"] + ).ConfigError("bad config"), + ) + def test_check_config_error(self, mock_load: MagicMock) -> None: + result = runner.invoke(app, ["check", "--config", "/bad.yaml"]) + assert result.exit_code == 2 + + @patch( + "nbkp.cli.load_config", + side_effect=__import__( + "nbkp.config", fromlist=["ConfigError"] + ).ConfigError("bad config"), + ) + def test_run_config_error(self, mock_load: MagicMock) -> None: + result = runner.invoke(app, ["run", "--config", "/bad.yaml"]) + assert result.exit_code == 2 + + def test_plain_error_message(self) -> None: + from nbkp.config import ConfigError + + err = ConfigError("Config file not found: /bad.yaml") + with patch("nbkp.cli.load_config", side_effect=err): + result = runner.invoke(app, ["check", "--config", "/bad.yaml"]) + assert result.exit_code == 2 + out = _strip_panel(result.output) + assert "Config file not found: /bad.yaml" in out + + def test_validation_error_message(self) -> None: + from nbkp.config import ConfigError + from pydantic import ValidationError + from nbkp.config.protocol import Config + + try: + Config.model_validate( + {"volumes": {"v": {"type": "ftp", "path": "/x"}}} + ) + except ValidationError as ve: + err = ConfigError(str(ve)) + err.__cause__ = ve + + with patch("nbkp.cli.load_config", side_effect=err): + result = runner.invoke(app, ["check", "--config", "/bad.yaml"]) + assert result.exit_code == 2 + out = _strip_panel(result.output) + assert "volumes → v" in out + assert "does not match any of the expected tags" in out + + def test_yaml_error_message(self) -> None: + import yaml + from nbkp.config import ConfigError + + try: + yaml.safe_load("not_a_list:\n - [invalid") + except yaml.YAMLError as ye: + err = ConfigError(f"Invalid YAML in /bad.yaml: {ye}") + err.__cause__ = ye + + with patch("nbkp.cli.load_config", side_effect=err): + result = runner.invoke(app, ["check", "--config", "/bad.yaml"]) + assert result.exit_code == 2 + out = _strip_panel(result.output) + assert "Invalid YAML" in out + + def test_cross_reference_error_message(self) -> None: + from nbkp.config import ConfigError + from pydantic import ValidationError + from nbkp.config.protocol import Config + + try: + Config.model_validate( + { + "ssh-endpoints": {}, + "volumes": { + "v": { + "type": "remote", + "ssh-endpoint": "missing", + "path": "/x", + }, + }, + "syncs": {}, + } + ) + except ValidationError as ve: + err = ConfigError(str(ve)) + err.__cause__ = ve + + with patch("nbkp.cli.load_config", side_effect=err): + result = runner.invoke(app, ["check", "--config", "/bad.yaml"]) + assert result.exit_code == 2 + out = _strip_panel(result.output) + assert "unknown ssh-endpoint 'missing'" in out + + +class TestShCommand: + @patch("nbkp.cli.load_config") + def test_generates_script(self, mock_load: MagicMock) -> None: + config = _sample_config() + mock_load.return_value = config + + result = runner.invoke(app, ["sh", "--config", "/fake.yaml"]) + assert result.exit_code == 0 + assert "#!/bin/bash" in result.output + assert "set -euo pipefail" in result.output + assert "sync_photos_to_nas()" in result.output + + @patch("nbkp.cli.load_config") + def test_config_path_in_header(self, mock_load: MagicMock) -> None: + config = _sample_config() + mock_load.return_value = config + + result = runner.invoke(app, ["sh", "--config", "/fake.yaml"]) + assert result.exit_code == 0 + assert "# Config: /fake.yaml" in result.output + + @patch("nbkp.cli.load_config") + def test_output_file(self, mock_load: MagicMock, tmp_path: object) -> None: + import pathlib + import stat + + tp = pathlib.Path(str(tmp_path)) + config = _sample_config() + mock_load.return_value = config + out = tp / "backup.sh" + + result = runner.invoke( + app, + ["sh", "--config", "/fake.yaml", "-o", str(out)], + ) + assert result.exit_code == 0 + assert out.exists() + content = out.read_text(encoding="utf-8") + assert "#!/bin/bash" in content + assert "sync_photos_to_nas()" in content + mode = out.stat().st_mode + assert mode & stat.S_IXUSR + assert mode & stat.S_IXGRP + + def test_relative_without_output_file(self) -> None: + result = runner.invoke( + app, + ["sh", "--config", "/fake.yaml", "--relative-src"], + ) + assert result.exit_code == 2 + + @patch("nbkp.cli.load_config") + def test_relative_with_output_file( + self, + mock_load: MagicMock, + tmp_path: object, + ) -> None: + import pathlib + + tp = pathlib.Path(str(tmp_path)) + config = _sample_config() + mock_load.return_value = config + out = tp / "backup.sh" + + result = runner.invoke( + app, + [ + "sh", + "--config", + "/fake.yaml", + "-o", + str(out), + "--relative-src", + ], + ) + assert result.exit_code == 0 + assert out.exists() + content = out.read_text(encoding="utf-8") + assert "NBKP_SCRIPT_DIR" in content + + @patch( + "nbkp.cli.load_config", + side_effect=__import__( + "nbkp.config", fromlist=["ConfigError"] + ).ConfigError("bad config"), + ) + def test_config_error(self, mock_load: MagicMock) -> None: + result = runner.invoke(app, ["sh", "--config", "/bad.yaml"]) + assert result.exit_code == 2 diff --git a/tests/test_configloader.py b/tests/test_configloader.py new file mode 100644 index 0000000..f55f7dc --- /dev/null +++ b/tests/test_configloader.py @@ -0,0 +1,958 @@ +"""Tests for nbkp.configloader.""" + +from __future__ import annotations + +from pathlib import Path + +import pytest +import yaml + +from nbkp.config import ( + BtrfsSnapshotConfig, + Config, + ConfigError, + DestinationSyncEndpoint, + EndpointFilter, + HardLinkSnapshotConfig, + LocalVolume, + RemoteVolume, + RsyncOptions, + SshEndpoint, + SshConnectionOptions, + SyncConfig, + SyncEndpoint, + find_config_file, + load_config, +) + + +def _config_to_yaml(config: Config) -> str: + return yaml.safe_dump( + config.model_dump(by_alias=True), + default_flow_style=False, + sort_keys=False, + ) + + +class TestFindConfigFile: + def test_explicit_path(self, sample_config_file: Path) -> None: + result = find_config_file(str(sample_config_file)) + assert result == sample_config_file + + def test_explicit_path_missing(self) -> None: + with pytest.raises(ConfigError, match="not found"): + find_config_file("/nonexistent/config.yaml") + + def test_xdg_config( + self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch + ) -> None: + xdg = tmp_path / "xdg" + cfg = xdg / "nbkp" / "config.yaml" + cfg.parent.mkdir(parents=True) + cfg.write_text("volumes: {}\nsyncs: {}\n") + monkeypatch.setenv("XDG_CONFIG_HOME", str(xdg)) + result = find_config_file() + assert result == cfg + + def test_no_config_found( + self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch + ) -> None: + monkeypatch.setenv("XDG_CONFIG_HOME", str(tmp_path / "empty")) + with pytest.raises(ConfigError, match="No config file found"): + find_config_file() + + +class TestLoadConfig: + def test_full_config(self, sample_config_file: Path) -> None: + cfg = load_config(str(sample_config_file)) + assert "nas-server" in cfg.ssh_endpoints + server = cfg.ssh_endpoints["nas-server"] + assert server.slug == "nas-server" + assert server.host == "nas.example.com" + assert server.port == 5022 + assert server.user == "backup" + assert server.key == "~/.ssh/key" + assert server.connection_options.connect_timeout == 10 + assert "local-data" in cfg.volumes + assert "nas" in cfg.volumes + assert "photos-to-nas" in cfg.syncs + local = cfg.volumes["local-data"] + assert isinstance(local, LocalVolume) + assert local.path == "/mnt/data" + remote = cfg.volumes["nas"] + assert isinstance(remote, RemoteVolume) + assert remote.ssh_endpoint == "nas-server" + sync = cfg.syncs["photos-to-nas"] + assert sync.source.volume == "local-data" + assert sync.source.subdir == "photos" + assert sync.destination.volume == "nas" + assert sync.destination.subdir == "photos-backup" + assert sync.enabled is True + assert sync.destination.btrfs_snapshots.enabled is False + assert sync.rsync_options.default_options_override is None + assert sync.rsync_options.extra_options == [] + assert sync.rsync_options.checksum is True + assert sync.rsync_options.compress is False + assert sync.filters == ["+ *.jpg", "- *.tmp"] + assert sync.filter_file == "~/.config/nbkp/filters/photos.rules" + + def test_minimal_config(self, sample_minimal_config_file: Path) -> None: + cfg = load_config(str(sample_minimal_config_file)) + sync = cfg.syncs["s1"] + assert sync.enabled is True + assert sync.destination.btrfs_snapshots.enabled is False + assert sync.source.subdir is None + assert sync.rsync_options.default_options_override is None + assert sync.rsync_options.extra_options == [] + assert sync.filters == [] + assert sync.filter_file is None + + def test_invalid_yaml(self, tmp_path: Path) -> None: + p = tmp_path / "bad.yaml" + p.write_text("not_a_list:\n - [invalid") + with pytest.raises(ConfigError, match="Invalid YAML"): + load_config(str(p)) + + def test_not_a_mapping(self, tmp_path: Path) -> None: + p = tmp_path / "list.yaml" + p.write_text("- item1\n- item2\n") + with pytest.raises(ConfigError, match="must be a YAML mapping"): + load_config(str(p)) + + def test_invalid_volume_type(self, tmp_path: Path) -> None: + p = tmp_path / "bad_type.yaml" + p.write_text( + "volumes:\n v:\n type: ftp\n path: /x\n" "syncs: {}\n" + ) + with pytest.raises(ConfigError) as excinfo: + load_config(str(p)) + cause = excinfo.value.__cause__ + assert cause is not None + assert "does not match any of the expected tags" in str(cause) + + def test_missing_local_path(self, tmp_path: Path) -> None: + p = tmp_path / "no_path.yaml" + p.write_text("volumes:\n v:\n type: local\nsyncs: {}\n") + with pytest.raises(ConfigError) as excinfo: + load_config(str(p)) + cause = excinfo.value.__cause__ + assert cause is not None + errors = cause.errors() + assert any( + err["loc"] == ("volumes", "v", "local", "path") + and err["type"] == "missing" + for err in errors + ) + + def test_missing_remote_host(self, tmp_path: Path) -> None: + p = tmp_path / "no_host.yaml" + p.write_text( + "ssh-endpoints:\n s:\n port: 22\n" + "volumes:\n v:\n type: remote\n" + " ssh-endpoint: s\n" + " path: /x\n" + "syncs: {}\n" + ) + with pytest.raises(ConfigError) as excinfo: + load_config(str(p)) + cause = excinfo.value.__cause__ + assert cause is not None + errors = cause.errors() + assert any( + "host" in str(err["loc"]) and err["type"] == "missing" + for err in errors + ) + + def test_unknown_ssh_endpoint_reference(self, tmp_path: Path) -> None: + p = tmp_path / "bad_server_ref.yaml" + p.write_text( + "ssh-endpoints: {}\n" + "volumes:\n v:\n type: remote\n" + " ssh-endpoint: missing\n" + " path: /x\n" + "syncs: {}\n" + ) + with pytest.raises(ConfigError) as excinfo: + load_config(str(p)) + cause = excinfo.value.__cause__ + assert cause is not None + assert "unknown ssh-endpoint 'missing'" in str(cause) + + def test_unknown_volume_reference(self, tmp_path: Path) -> None: + p = tmp_path / "bad_ref.yaml" + p.write_text( + "volumes:\n v:\n type: local\n path: /x\n" + "syncs:\n s:\n source:\n volume: v\n" + " destination:\n volume: missing\n" + ) + with pytest.raises(ConfigError) as excinfo: + load_config(str(p)) + cause = excinfo.value.__cause__ + assert cause is not None + assert "unknown destination volume" in str(cause) + + def test_missing_source_volume(self, tmp_path: Path) -> None: + p = tmp_path / "no_src_vol.yaml" + p.write_text( + "volumes:\n v:\n type: local\n path: /x\n" + "syncs:\n s:\n source:\n volume: missing\n" + " destination:\n volume: v\n" + ) + with pytest.raises(ConfigError) as excinfo: + load_config(str(p)) + cause = excinfo.value.__cause__ + assert cause is not None + assert "unknown source volume" in str(cause) + + def test_sync_missing_source(self, tmp_path: Path) -> None: + p = tmp_path / "no_src.yaml" + p.write_text( + "volumes:\n v:\n type: local\n path: /x\n" + "syncs:\n s:\n destination:\n volume: v\n" + ) + with pytest.raises(ConfigError) as excinfo: + load_config(str(p)) + cause = excinfo.value.__cause__ + assert cause is not None + errors = cause.errors() + assert any( + err["loc"] == ("syncs", "s", "source") and err["type"] == "missing" + for err in errors + ) + + def test_filter_normalization(self, tmp_path: Path) -> None: + p = tmp_path / "filters.yaml" + p.write_text( + "volumes:\n" + " v:\n type: local\n path: /x\n" + "syncs:\n" + " s:\n" + " source:\n volume: v\n" + " destination:\n volume: v\n" + " filters:\n" + ' - include: "*.jpg"\n' + ' - exclude: "*.tmp"\n' + ' - "H .git"\n' + ) + cfg = load_config(str(p)) + sync = cfg.syncs["s"] + assert sync.filters == ["+ *.jpg", "- *.tmp", "H .git"] + + def test_rsync_options_override(self, tmp_path: Path) -> None: + config = Config( + volumes={ + "v": LocalVolume(slug="v", path="/x"), + }, + syncs={ + "s": SyncConfig( + slug="s", + source=SyncEndpoint(volume="v"), + destination=DestinationSyncEndpoint(volume="v"), + rsync_options=RsyncOptions( + default_options_override=["-a", "--delete"], + ), + ), + }, + ) + p = tmp_path / "opts.yaml" + p.write_text(_config_to_yaml(config)) + cfg = load_config(str(p)) + sync = cfg.syncs["s"] + assert sync.rsync_options.default_options_override == [ + "-a", + "--delete", + ] + assert sync.rsync_options.extra_options == [] + + def test_rsync_extra_options(self, tmp_path: Path) -> None: + config = Config( + volumes={ + "v": LocalVolume(slug="v", path="/x"), + }, + syncs={ + "s": SyncConfig( + slug="s", + source=SyncEndpoint(volume="v"), + destination=DestinationSyncEndpoint(volume="v"), + rsync_options=RsyncOptions( + extra_options=[ + "--bwlimit=1000", + "--progress", + ], + ), + ), + }, + ) + p = tmp_path / "extra.yaml" + p.write_text(_config_to_yaml(config)) + cfg = load_config(str(p)) + sync = cfg.syncs["s"] + assert sync.rsync_options.default_options_override is None + assert sync.rsync_options.extra_options == [ + "--bwlimit=1000", + "--progress", + ] + + def test_connection_options(self, tmp_path: Path) -> None: + config = Config( + ssh_endpoints={ + "slow": SshEndpoint( + slug="slow", + host="slow.example.com", + connection_options=SshConnectionOptions( + connect_timeout=30, + strict_host_key_checking=False, + known_hosts_file="/dev/null", + ), + ), + }, + ) + p = tmp_path / "ssh_opts.yaml" + p.write_text(_config_to_yaml(config)) + cfg = load_config(str(p)) + opts = cfg.ssh_endpoints["slow"].connection_options + assert opts.connect_timeout == 30 + assert opts.strict_host_key_checking is False + assert opts.known_hosts_file == "/dev/null" + + def test_connection_options_server_alive_interval( + self, tmp_path: Path + ) -> None: + config = Config( + ssh_endpoints={ + "keepalive": SshEndpoint( + slug="keepalive", + host="host.example.com", + connection_options=SshConnectionOptions( + server_alive_interval=60, + ), + ), + }, + ) + p = tmp_path / "keepalive.yaml" + p.write_text(_config_to_yaml(config)) + cfg = load_config(str(p)) + opts = cfg.ssh_endpoints["keepalive"].connection_options + assert opts.server_alive_interval == 60 + + def test_connection_options_channel_timeout(self, tmp_path: Path) -> None: + config = Config( + ssh_endpoints={ + "ch-timeout": SshEndpoint( + slug="ch-timeout", + host="host.example.com", + connection_options=SshConnectionOptions( + channel_timeout=30.0, + ), + ), + }, + ) + p = tmp_path / "ch_timeout.yaml" + p.write_text(_config_to_yaml(config)) + cfg = load_config(str(p)) + opts = cfg.ssh_endpoints["ch-timeout"].connection_options + assert opts.channel_timeout == 30.0 + + def test_connection_options_disabled_algorithms( + self, tmp_path: Path + ) -> None: + config = Config( + ssh_endpoints={ + "restricted": SshEndpoint( + slug="restricted", + host="host.example.com", + connection_options=SshConnectionOptions( + disabled_algorithms={ + "ciphers": ["aes128-cbc"], + }, + ), + ), + }, + ) + p = tmp_path / "disabled_algs.yaml" + p.write_text(_config_to_yaml(config)) + cfg = load_config(str(p)) + opts = cfg.ssh_endpoints["restricted"].connection_options + assert opts.disabled_algorithms == { + "ciphers": ["aes128-cbc"], + } + + def test_proxy_jump_valid(self, tmp_path: Path) -> None: + config = Config( + ssh_endpoints={ + "bastion": SshEndpoint( + slug="bastion", + host="bastion.example.com", + ), + "target": SshEndpoint( + slug="target", + host="target.internal", + proxy_jump="bastion", + ), + }, + ) + p = tmp_path / "proxy.yaml" + p.write_text(_config_to_yaml(config)) + cfg = load_config(str(p)) + assert cfg.ssh_endpoints["target"].proxy_jump == "bastion" + chain = cfg.resolve_proxy_chain(cfg.ssh_endpoints["target"]) + assert len(chain) == 1 + assert chain[0].host == "bastion.example.com" + + def test_proxy_jump_unknown_server(self, tmp_path: Path) -> None: + p = tmp_path / "bad_proxy.yaml" + p.write_text( + yaml.safe_dump( + { + "ssh-endpoints": { + "target": { + "host": "target.internal", + "proxy-jump": "nonexistent", + }, + }, + } + ) + ) + with pytest.raises(ConfigError) as excinfo: + load_config(str(p)) + cause = excinfo.value.__cause__ + assert cause is not None + assert "unknown proxy-jump server" in str(cause) + + def test_proxy_jump_circular(self, tmp_path: Path) -> None: + p = tmp_path / "circular.yaml" + p.write_text( + yaml.safe_dump( + { + "ssh-endpoints": { + "a": { + "host": "a.example.com", + "proxy-jump": "b", + }, + "b": { + "host": "b.example.com", + "proxy-jump": "a", + }, + }, + } + ) + ) + with pytest.raises(ConfigError) as excinfo: + load_config(str(p)) + cause = excinfo.value.__cause__ + assert cause is not None + assert "Circular proxy-jump chain" in str(cause) + + def test_proxy_jumps_valid(self, tmp_path: Path) -> None: + config = Config( + ssh_endpoints={ + "bastion1": SshEndpoint( + slug="bastion1", + host="bastion1.example.com", + ), + "bastion2": SshEndpoint( + slug="bastion2", + host="bastion2.example.com", + ), + "target": SshEndpoint( + slug="target", + host="target.internal", + proxy_jumps=["bastion1", "bastion2"], + ), + }, + ) + p = tmp_path / "proxy_jumps.yaml" + p.write_text(_config_to_yaml(config)) + cfg = load_config(str(p)) + chain = cfg.resolve_proxy_chain(cfg.ssh_endpoints["target"]) + assert len(chain) == 2 + assert chain[0].host == "bastion1.example.com" + assert chain[1].host == "bastion2.example.com" + + def test_proxy_jumps_single_element(self, tmp_path: Path) -> None: + config = Config( + ssh_endpoints={ + "bastion": SshEndpoint( + slug="bastion", + host="bastion.example.com", + ), + "target": SshEndpoint( + slug="target", + host="target.internal", + proxy_jumps=["bastion"], + ), + }, + ) + p = tmp_path / "proxy_jumps_single.yaml" + p.write_text(_config_to_yaml(config)) + cfg = load_config(str(p)) + chain = cfg.resolve_proxy_chain(cfg.ssh_endpoints["target"]) + assert len(chain) == 1 + assert chain[0].host == "bastion.example.com" + + def test_proxy_jump_and_proxy_jumps_mutual_exclusivity( + self, tmp_path: Path + ) -> None: + p = tmp_path / "exclusive.yaml" + p.write_text( + yaml.safe_dump( + { + "ssh-endpoints": { + "bastion": { + "host": "bastion.example.com", + }, + "target": { + "host": "target.internal", + "proxy-jump": "bastion", + "proxy-jumps": ["bastion"], + }, + }, + } + ) + ) + with pytest.raises(ConfigError) as excinfo: + load_config(str(p)) + cause = excinfo.value.__cause__ + assert cause is not None + assert "mutually exclusive" in str(cause) + + def test_proxy_jumps_unknown_server(self, tmp_path: Path) -> None: + p = tmp_path / "bad_proxy_jumps.yaml" + p.write_text( + yaml.safe_dump( + { + "ssh-endpoints": { + "target": { + "host": "target.internal", + "proxy-jumps": ["nonexistent"], + }, + }, + } + ) + ) + with pytest.raises(ConfigError) as excinfo: + load_config(str(p)) + cause = excinfo.value.__cause__ + assert cause is not None + assert "unknown proxy-jump server" in str(cause) + + def test_proxy_jumps_circular(self, tmp_path: Path) -> None: + p = tmp_path / "circular_jumps.yaml" + p.write_text( + yaml.safe_dump( + { + "ssh-endpoints": { + "a": { + "host": "a.example.com", + "proxy-jumps": ["b"], + }, + "b": { + "host": "b.example.com", + "proxy-jumps": ["a"], + }, + }, + } + ) + ) + with pytest.raises(ConfigError) as excinfo: + load_config(str(p)) + cause = excinfo.value.__cause__ + assert cause is not None + assert "Circular proxy-jump chain" in str(cause) + + def test_extends_proxy_jumps_overrides_parent_proxy_jump( + self, tmp_path: Path + ) -> None: + p = tmp_path / "extends_jumps.yaml" + p.write_text( + yaml.safe_dump( + { + "ssh-endpoints": { + "bastion1": { + "host": "bastion1.example.com", + }, + "bastion2": { + "host": "bastion2.example.com", + }, + "bastion3": { + "host": "bastion3.example.com", + }, + "parent": { + "host": "parent.internal", + "proxy-jump": "bastion1", + }, + "child": { + "host": "child.internal", + "extends": "parent", + "proxy-jumps": [ + "bastion2", + "bastion3", + ], + }, + }, + } + ) + ) + cfg = load_config(str(p)) + chain = cfg.resolve_proxy_chain(cfg.ssh_endpoints["child"]) + assert len(chain) == 2 + assert chain[0].host == "bastion2.example.com" + assert chain[1].host == "bastion3.example.com" + + def test_extends_proxy_jump_overrides_parent_proxy_jumps( + self, tmp_path: Path + ) -> None: + p = tmp_path / "extends_jump.yaml" + p.write_text( + yaml.safe_dump( + { + "ssh-endpoints": { + "bastion1": { + "host": "bastion1.example.com", + }, + "bastion2": { + "host": "bastion2.example.com", + }, + "bastion3": { + "host": "bastion3.example.com", + }, + "parent": { + "host": "parent.internal", + "proxy-jumps": [ + "bastion1", + "bastion2", + ], + }, + "child": { + "host": "child.internal", + "extends": "parent", + "proxy-jump": "bastion3", + }, + }, + } + ) + ) + cfg = load_config(str(p)) + chain = cfg.resolve_proxy_chain(cfg.ssh_endpoints["child"]) + assert len(chain) == 1 + assert chain[0].host == "bastion3.example.com" + + def test_proxy_jump_chain_property(self) -> None: + # Single proxy-jump + ep_single = SshEndpoint( + slug="single", + host="host.example.com", + proxy_jump="bastion", + ) + assert ep_single.proxy_jump_chain == ["bastion"] + + # List proxy-jumps + ep_list = SshEndpoint( + slug="multi", + host="host.example.com", + proxy_jumps=["bastion1", "bastion2"], + ) + assert ep_list.proxy_jump_chain == [ + "bastion1", + "bastion2", + ] + + # No proxy + ep_none = SshEndpoint( + slug="no-proxy", + host="host.example.com", + ) + assert ep_none.proxy_jump_chain == [] + + def test_invalid_filter_entry(self, tmp_path: Path) -> None: + p = tmp_path / "bad_filter.yaml" + p.write_text( + "volumes:\n" + " v:\n type: local\n path: /x\n" + "syncs:\n" + " s:\n" + " source:\n volume: v\n" + " destination:\n volume: v\n" + " filters:\n" + " - badkey: value\n" + ) + with pytest.raises(ConfigError) as excinfo: + load_config(str(p)) + cause = excinfo.value.__cause__ + assert cause is not None + assert "include" in str(cause) or "exclude" in str(cause) + + def test_hard_link_snapshots(self, tmp_path: Path) -> None: + config = Config( + volumes={ + "v": LocalVolume(slug="v", path="/x"), + }, + syncs={ + "s": SyncConfig( + slug="s", + source=SyncEndpoint(volume="v"), + destination=DestinationSyncEndpoint( + volume="v", + hard_link_snapshots=HardLinkSnapshotConfig( + enabled=True, max_snapshots=10 + ), + ), + ), + }, + ) + p = tmp_path / "hl.yaml" + p.write_text(_config_to_yaml(config)) + cfg = load_config(str(p)) + sync = cfg.syncs["s"] + assert sync.destination.hard_link_snapshots.enabled is True + assert sync.destination.hard_link_snapshots.max_snapshots == 10 + assert sync.destination.btrfs_snapshots.enabled is False + assert sync.destination.snapshot_mode == "hard-link" + + def test_hard_link_snapshots_no_max(self, tmp_path: Path) -> None: + config = Config( + volumes={ + "v": LocalVolume(slug="v", path="/x"), + }, + syncs={ + "s": SyncConfig( + slug="s", + source=SyncEndpoint(volume="v"), + destination=DestinationSyncEndpoint( + volume="v", + hard_link_snapshots=HardLinkSnapshotConfig( + enabled=True + ), + ), + ), + }, + ) + p = tmp_path / "hl_no_max.yaml" + p.write_text(_config_to_yaml(config)) + cfg = load_config(str(p)) + sync = cfg.syncs["s"] + assert sync.destination.hard_link_snapshots.enabled is True + assert sync.destination.hard_link_snapshots.max_snapshots is None + + def test_mutual_exclusivity_btrfs_and_hardlink( + self, tmp_path: Path + ) -> None: + with pytest.raises(Exception, match="mutually exclusive"): + DestinationSyncEndpoint( + volume="v", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + hard_link_snapshots=HardLinkSnapshotConfig(enabled=True), + ) + + def test_snapshot_mode_none(self) -> None: + ep = DestinationSyncEndpoint(volume="v") + assert ep.snapshot_mode == "none" + + def test_snapshot_mode_btrfs(self) -> None: + ep = DestinationSyncEndpoint( + volume="v", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ) + assert ep.snapshot_mode == "btrfs" + + def test_snapshot_mode_hard_link(self) -> None: + ep = DestinationSyncEndpoint( + volume="v", + hard_link_snapshots=HardLinkSnapshotConfig(enabled=True), + ) + assert ep.snapshot_mode == "hard-link" + + def test_location_and_locations_mutual_exclusivity( + self, tmp_path: Path + ) -> None: + p = tmp_path / "exclusive_loc.yaml" + p.write_text( + yaml.safe_dump( + { + "ssh-endpoints": { + "server": { + "host": "server.example.com", + "location": "home", + "locations": ["home", "travel"], + }, + }, + } + ) + ) + with pytest.raises(ConfigError) as excinfo: + load_config(str(p)) + cause = excinfo.value.__cause__ + assert cause is not None + assert "mutually exclusive" in str(cause) + + def test_location_list_property(self) -> None: + # Single location + ep_single = SshEndpoint( + slug="single", + host="host.example.com", + location="home", + ) + assert ep_single.location_list == ["home"] + + # List locations + ep_list = SshEndpoint( + slug="multi", + host="host.example.com", + locations=["home", "travel"], + ) + assert ep_list.location_list == ["home", "travel"] + + # No location + ep_none = SshEndpoint( + slug="no-loc", + host="host.example.com", + ) + assert ep_none.location_list == [] + + def test_extends_locations_overrides_parent_location( + self, tmp_path: Path + ) -> None: + p = tmp_path / "extends_locs.yaml" + p.write_text( + yaml.safe_dump( + { + "ssh-endpoints": { + "parent": { + "host": "parent.internal", + "location": "home", + }, + "child": { + "host": "child.internal", + "extends": "parent", + "locations": ["home", "travel"], + }, + }, + } + ) + ) + cfg = load_config(str(p)) + assert cfg.ssh_endpoints["child"].location_list == [ + "home", + "travel", + ] + + def test_extends_location_overrides_parent_locations( + self, tmp_path: Path + ) -> None: + p = tmp_path / "extends_loc.yaml" + p.write_text( + yaml.safe_dump( + { + "ssh-endpoints": { + "parent": { + "host": "parent.internal", + "locations": [ + "home", + "travel", + ], + }, + "child": { + "host": "child.internal", + "extends": "parent", + "location": "office", + }, + }, + } + ) + ) + cfg = load_config(str(p)) + assert cfg.ssh_endpoints["child"].location_list == [ + "office", + ] + + def test_resolve_endpoint_location_filter_with_locations( + self, + ) -> None: + config = Config( + ssh_endpoints={ + "home-server": SshEndpoint( + slug="home-server", + host="192.168.1.10", + location="home", + ), + "multi-server": SshEndpoint( + slug="multi-server", + host="10.0.0.5", + locations=["home", "travel"], + ), + "office-server": SshEndpoint( + slug="office-server", + host="10.1.0.5", + location="office", + ), + }, + volumes={ + "remote": RemoteVolume( + slug="remote", + ssh_endpoint="home-server", + ssh_endpoints=[ + "home-server", + "multi-server", + "office-server", + ], + path="/data", + ), + }, + syncs={}, + ) + # Filter for "travel" should match multi-server + ef = EndpointFilter(locations=["travel"]) + result = config.resolve_endpoint_for_volume( + config.volumes["remote"], ef # type: ignore[arg-type] + ) + assert result.slug == "multi-server" + + def test_source_btrfs_snapshots(self, tmp_path: Path) -> None: + config = Config( + volumes={ + "v": LocalVolume(slug="v", path="/x"), + }, + syncs={ + "s": SyncConfig( + slug="s", + source=SyncEndpoint( + volume="v", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + destination=DestinationSyncEndpoint(volume="v"), + ), + }, + ) + p = tmp_path / "src_btrfs.yaml" + p.write_text(_config_to_yaml(config)) + cfg = load_config(str(p)) + sync = cfg.syncs["s"] + assert sync.source.btrfs_snapshots.enabled is True + assert sync.source.snapshot_mode == "btrfs" + assert sync.destination.btrfs_snapshots.enabled is False + + def test_source_hard_link_snapshots(self, tmp_path: Path) -> None: + config = Config( + volumes={ + "v": LocalVolume(slug="v", path="/x"), + }, + syncs={ + "s": SyncConfig( + slug="s", + source=SyncEndpoint( + volume="v", + hard_link_snapshots=HardLinkSnapshotConfig( + enabled=True + ), + ), + destination=DestinationSyncEndpoint(volume="v"), + ), + }, + ) + p = tmp_path / "src_hl.yaml" + p.write_text(_config_to_yaml(config)) + cfg = load_config(str(p)) + sync = cfg.syncs["s"] + assert sync.source.hard_link_snapshots.enabled is True + assert sync.source.snapshot_mode == "hard-link" + assert sync.source.btrfs_snapshots.enabled is False diff --git a/tests/test_encryption.py b/tests/test_encryption.py deleted file mode 100644 index 9471d8d..0000000 --- a/tests/test_encryption.py +++ /dev/null @@ -1,106 +0,0 @@ -""" -Tests for encryption functionality. -""" - -import pytest -import tempfile -from pathlib import Path -from ssb.encryption import EncryptionManager - - -class TestEncryptionManager: - """Test cases for EncryptionManager.""" - - def test_init(self): - """Test EncryptionManager initialization.""" - encryption_manager = EncryptionManager() - assert encryption_manager.key is not None - assert len(encryption_manager.key) > 0 - - def test_init_with_key(self): - """Test EncryptionManager initialization with provided key.""" - key = EncryptionManager().key - encryption_manager = EncryptionManager(key) - assert encryption_manager.key == key - - def test_from_password(self): - """Test creating EncryptionManager from password.""" - password = "test_password" - encryption_manager = EncryptionManager.from_password(password) - assert encryption_manager.key is not None - - def test_encrypt_decrypt_file(self): - """Test encrypting and decrypting a file.""" - with tempfile.TemporaryDirectory() as temp_dir: - # Create a test file - original_file = Path(temp_dir) / "original.txt" - original_content = "Hello, World! This is a test file." - original_file.write_text(original_content) - - encrypted_file = Path(temp_dir) / "encrypted.bin" - decrypted_file = Path(temp_dir) / "decrypted.txt" - - encryption_manager = EncryptionManager() - - # Encrypt the file - encryption_manager.encrypt_file( - str(original_file), str(encrypted_file) - ) - assert encrypted_file.exists() - - # Decrypt the file - encryption_manager.decrypt_file( - str(encrypted_file), str(decrypted_file) - ) - assert decrypted_file.exists() - - # Check content - assert decrypted_file.read_text() == original_content - - def test_encrypt_nonexistent_file(self): - """Test encrypting a non-existent file.""" - with tempfile.TemporaryDirectory() as temp_dir: - encryption_manager = EncryptionManager() - output_file = Path(temp_dir) / "output.bin" - - with pytest.raises(FileNotFoundError): - encryption_manager.encrypt_file( - "/nonexistent/file.txt", str(output_file) - ) - - def test_save_load_key(self): - """Test saving and loading encryption keys.""" - with tempfile.TemporaryDirectory() as temp_dir: - encryption_manager = EncryptionManager() - key_file = Path(temp_dir) / "key.bin" - - # Save the key - encryption_manager.save_key(str(key_file)) - assert key_file.exists() - - # Load the key - loaded_manager = EncryptionManager.load_key(str(key_file)) - assert loaded_manager.key == encryption_manager.key - - def test_decrypt_with_wrong_key(self): - """Test decrypting with wrong key.""" - with tempfile.TemporaryDirectory() as temp_dir: - # Create and encrypt a file - original_file = Path(temp_dir) / "original.txt" - original_file.write_text("Test content") - - encrypted_file = Path(temp_dir) / "encrypted.bin" - decrypted_file = Path(temp_dir) / "decrypted.txt" - - encryption_manager1 = EncryptionManager() - encryption_manager1.encrypt_file( - str(original_file), str(encrypted_file) - ) - - # Try to decrypt with different key - encryption_manager2 = EncryptionManager() - - with pytest.raises(ValueError): - encryption_manager2.decrypt_file( - str(encrypted_file), str(decrypted_file) - ) diff --git a/tests/test_scriptgen.py b/tests/test_scriptgen.py new file mode 100644 index 0000000..00bbeb8 --- /dev/null +++ b/tests/test_scriptgen.py @@ -0,0 +1,1159 @@ +"""Tests for nbkp.scriptgen.""" + +from __future__ import annotations + +import subprocess +from datetime import datetime, timezone + +from nbkp.config import ( + BtrfsSnapshotConfig, + Config, + DestinationSyncEndpoint, + HardLinkSnapshotConfig, + LocalVolume, + RemoteVolume, + RsyncOptions, + SshEndpoint, + SshConnectionOptions, + SyncConfig, + SyncEndpoint, + resolve_all_endpoints, +) +from nbkp.scriptgen import ScriptOptions, generate_script + +_NOW = datetime(2026, 2, 21, 12, 0, 0, tzinfo=timezone.utc) +_OPTIONS = ScriptOptions(config_path="/etc/nbkp/config.yaml") + + +def _local_to_local_config() -> Config: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="my-sync", + source=SyncEndpoint(volume="src", subdir="photos"), + destination=DestinationSyncEndpoint(volume="dst", subdir="backup"), + ) + return Config( + volumes={"src": src, "dst": dst}, + syncs={"my-sync": sync}, + ) + + +def _local_to_remote_config() -> Config: + src = LocalVolume(slug="src", path="/mnt/data") + server = SshEndpoint( + slug="nas", + host="nas.example.com", + port=5022, + user="backup", + key="~/.ssh/nas_ed25519", + ) + dst = RemoteVolume( + slug="nas-vol", + ssh_endpoint="nas", + path="/volume1/backups", + ) + sync = SyncConfig( + slug="photos-to-nas", + source=SyncEndpoint(volume="src", subdir="photos"), + destination=DestinationSyncEndpoint( + volume="nas-vol", subdir="photos-backup" + ), + ) + return Config( + ssh_endpoints={"nas": server}, + volumes={"src": src, "nas-vol": dst}, + syncs={"photos-to-nas": sync}, + ) + + +def _remote_to_local_config() -> Config: + server = SshEndpoint( + slug="remote", + host="remote.example.com", + user="admin", + ) + src = RemoteVolume( + slug="remote-vol", + ssh_endpoint="remote", + path="/data", + ) + dst = LocalVolume(slug="local", path="/mnt/backup") + sync = SyncConfig( + slug="pull-data", + source=SyncEndpoint(volume="remote-vol"), + destination=DestinationSyncEndpoint(volume="local"), + ) + return Config( + ssh_endpoints={"remote": server}, + volumes={"remote-vol": src, "local": dst}, + syncs={"pull-data": sync}, + ) + + +def _btrfs_config() -> Config: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="btrfs-sync", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True, max_snapshots=5), + ), + ) + return Config( + volumes={"src": src, "dst": dst}, + syncs={"btrfs-sync": sync}, + ) + + +def _btrfs_no_prune_config() -> Config: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="btrfs-sync", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + btrfs_snapshots=BtrfsSnapshotConfig(enabled=True), + ), + ) + return Config( + volumes={"src": src, "dst": dst}, + syncs={"btrfs-sync": sync}, + ) + + +def _disabled_config() -> Config: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="disabled-sync", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + enabled=False, + ) + return Config( + volumes={"src": src, "dst": dst}, + syncs={"disabled-sync": sync}, + ) + + +def _filters_config() -> Config: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="filtered-sync", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + filters=["+ *.jpg", "- *.tmp", "H .git"], + filter_file="~/.config/nbkp/filters.rules", + ) + return Config( + volumes={"src": src, "dst": dst}, + syncs={"filtered-sync": sync}, + ) + + +def _proxy_jump_config() -> Config: + bastion = SshEndpoint( + slug="bastion", + host="bastion.example.com", + user="admin", + ) + nas = SshEndpoint( + slug="nas", + host="nas.internal", + port=5022, + user="backup", + proxy_jump="bastion", + ) + src = LocalVolume(slug="src", path="/mnt/data") + dst = RemoteVolume( + slug="nas-vol", + ssh_endpoint="nas", + path="/volume1", + ) + sync = SyncConfig( + slug="proxy-sync", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="nas-vol"), + ) + return Config( + ssh_endpoints={"bastion": bastion, "nas": nas}, + volumes={"src": src, "nas-vol": dst}, + syncs={"proxy-sync": sync}, + ) + + +def _proxy_jumps_config() -> Config: + bastion1 = SshEndpoint( + slug="bastion1", + host="bastion1.example.com", + user="admin", + ) + bastion2 = SshEndpoint( + slug="bastion2", + host="bastion2.example.com", + port=2222, + ) + nas = SshEndpoint( + slug="nas", + host="nas.internal", + port=5022, + user="backup", + proxy_jumps=["bastion1", "bastion2"], + ) + src = LocalVolume(slug="src", path="/mnt/data") + dst = RemoteVolume( + slug="nas-vol", + ssh_endpoint="nas", + path="/volume1", + ) + sync = SyncConfig( + slug="proxy-jumps-sync", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="nas-vol"), + ) + return Config( + ssh_endpoints={ + "bastion1": bastion1, + "bastion2": bastion2, + "nas": nas, + }, + volumes={"src": src, "nas-vol": dst}, + syncs={"proxy-jumps-sync": sync}, + ) + + +class TestHeader: + def test_shebang(self) -> None: + config = _local_to_local_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert script.startswith("#!/bin/bash\n") + + def test_strict_mode(self) -> None: + config = _local_to_local_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "set -euo pipefail" in script + assert "IFS=$'\\n\\t'" in script + + def test_config_path_in_header(self) -> None: + config = _local_to_local_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "# Config: /etc/nbkp/config.yaml" in script + + def test_timestamp_in_header(self) -> None: + config = _local_to_local_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "2026-02-21T12:00:00Z" in script + + def test_preserved_dropped_comments(self) -> None: + config = _local_to_local_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "# Preserved from nbkp run:" in script + assert "# Dropped from nbkp run:" in script + assert "rsync command variants" in script + assert "Paramiko-only SSH options" in script + + def test_arg_parsing(self) -> None: + config = _local_to_local_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "NBKP_DRY_RUN=false" in script + assert 'NBKP_PROGRESS=""' in script + assert "-n|--dry-run" in script + assert "NBKP_FAILURES=0" in script + + def test_nbkp_log_helper(self) -> None: + config = _local_to_local_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "nbkp_log()" in script + + def test_nbkp_log_dry_run_tag(self) -> None: + config = _local_to_local_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "[nbkp] [dry-run]" in script + assert '"[nbkp] $*"' in script + + +class TestVolumeChecks: + def test_local_volume_check(self) -> None: + config = _local_to_local_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "test -f /mnt/src/.nbkp-vol" in script + assert "test -f /mnt/dst/.nbkp-vol" in script + + def test_remote_volume_check(self) -> None: + config = _local_to_remote_config() + resolved = resolve_all_endpoints(config) + script = generate_script( + config, _OPTIONS, now=_NOW, resolved_endpoints=resolved + ) + # Remote volume check uses ssh + assert "ssh" in script + assert "/volume1/backups/.nbkp-vol" in script + + +class TestLocalToLocal: + def test_rsync_command(self) -> None: + config = _local_to_local_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "rsync" in script + assert "/mnt/src/photos/" in script + assert "/mnt/dst/backup/" in script + + def test_function_name(self) -> None: + config = _local_to_local_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "sync_my_sync()" in script + + def test_sync_invocation(self) -> None: + config = _local_to_local_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert ( + "sync_my_sync" " || NBKP_FAILURES=$((NBKP_FAILURES + 1))" + ) in script + + +class TestLocalToRemote: + def test_rsync_with_ssh(self) -> None: + config = _local_to_remote_config() + resolved = resolve_all_endpoints(config) + script = generate_script( + config, _OPTIONS, now=_NOW, resolved_endpoints=resolved + ) + assert "rsync" in script + assert "/mnt/data/photos/" in script + assert "backup@nas.example.com:/volume1/backups" in script + assert "-e" in script + assert "-p" in script + assert "5022" in script + + +class TestRemoteToLocal: + def test_rsync_with_remote_source(self) -> None: + config = _remote_to_local_config() + resolved = resolve_all_endpoints(config) + script = generate_script( + config, _OPTIONS, now=_NOW, resolved_endpoints=resolved + ) + assert "admin@remote.example.com:/data/" in script + assert "/mnt/backup/" in script + + +class TestDisabledSync: + def test_commented_out(self) -> None: + config = _disabled_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "# : disabled" in script + assert "# sync_disabled_sync()" in script + + def test_disabled_invocation_commented(self) -> None: + config = _disabled_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "# sync_disabled_sync || NBKP_FAILURES" in script + + +class TestBtrfs: + def test_no_link_dest(self) -> None: + config = _btrfs_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "RSYNC_LINK_DEST" not in script + + def test_snapshot_creation(self) -> None: + config = _btrfs_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "btrfs" in script + assert "subvolume" in script + assert "snapshot" in script + assert "/mnt/dst/latest" in script + assert "NBKP_TS=$(date -u" in script + + def test_snapshot_guarded_by_dry_run(self) -> None: + config = _btrfs_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert 'if [ "$NBKP_DRY_RUN" = false ]' in script + + def test_prune_with_max_snapshots(self) -> None: + config = _btrfs_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "Prune old snapshots" in script + assert "max: 5" in script + assert "NBKP_EXCESS" in script + assert "btrfs property set" in script + assert "btrfs subvolume delete" in script + + def test_no_prune_without_max(self) -> None: + config = _btrfs_no_prune_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "Prune old snapshots" not in script + + def test_latest_and_snapshots_dir_checks(self) -> None: + config = _btrfs_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "latest/ directory not found" in script + assert "snapshots/ directory not found" in script + + +class TestPreflightChecks: + def test_source_sentinel(self) -> None: + config = _local_to_local_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert ".nbkp-src" in script + assert "source sentinel" in script + + def test_destination_sentinel(self) -> None: + config = _local_to_local_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert ".nbkp-dst" in script + assert "destination sentinel" in script + + def test_rsync_availability(self) -> None: + config = _local_to_local_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "command -v rsync" in script + assert "rsync not found" in script + + def test_btrfs_availability(self) -> None: + config = _btrfs_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "command -v btrfs" in script + assert "btrfs not found" in script + + def test_remote_preflight_uses_ssh(self) -> None: + config = _local_to_remote_config() + resolved = resolve_all_endpoints(config) + script = generate_script( + config, _OPTIONS, now=_NOW, resolved_endpoints=resolved + ) + # Remote destination checks should use ssh + assert "ssh" in script + assert ".nbkp-dst" in script + + +class TestDryRunAndProgress: + def test_dry_run_flag_injected(self) -> None: + config = _local_to_local_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "RSYNC_DRY_RUN_FLAG" in script + assert '${RSYNC_DRY_RUN_FLAG:+"$RSYNC_DRY_RUN_FLAG"}' in script + + def test_progress_flags_injected(self) -> None: + config = _local_to_local_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "RSYNC_PROGRESS_FLAGS" in script + assert "$RSYNC_PROGRESS_FLAGS" in script + + def test_progress_modes(self) -> None: + config = _local_to_local_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "overall" in script + assert "per-file" in script + assert "--info=progress2" in script + assert "--progress" in script + + +class TestFilters: + def test_filter_args_present(self) -> None: + config = _filters_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "--filter=+ *.jpg" in script + assert "--filter=- *.tmp" in script + assert "--filter=H .git" in script + + def test_filter_file_present(self) -> None: + config = _filters_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "--filter=merge ~/.config/nbkp/filters.rules" in script + + +class TestProxyJump: + def test_proxy_jump_in_ssh(self) -> None: + config = _proxy_jump_config() + resolved = resolve_all_endpoints(config) + script = generate_script( + config, _OPTIONS, now=_NOW, resolved_endpoints=resolved + ) + assert "ProxyCommand=" in script + assert "admin@bastion.example.com" in script + + def test_proxy_jumps_in_ssh(self) -> None: + config = _proxy_jumps_config() + resolved = resolve_all_endpoints(config) + script = generate_script( + config, + _OPTIONS, + now=_NOW, + resolved_endpoints=resolved, + ) + assert "ProxyCommand=" in script + assert "admin@bastion1.example.com" in script + assert "bastion2.example.com" in script + + def test_proxy_jumps_valid_syntax(self) -> None: + config = _proxy_jumps_config() + resolved = resolve_all_endpoints(config) + script = generate_script( + config, + _OPTIONS, + now=_NOW, + resolved_endpoints=resolved, + ) + result = subprocess.run( + ["bash", "-n"], + input=script, + capture_output=True, + text=True, + ) + assert result.returncode == 0, f"bash -n failed:\n{result.stderr}" + + +class TestSshConnectionOptions: + def test_connection_options_in_script(self) -> None: + server = SshEndpoint( + slug="nas", + host="nas.example.com", + user="backup", + connection_options=SshConnectionOptions( + compress=True, + server_alive_interval=60, + strict_host_key_checking=False, + forward_agent=True, + ), + ) + src = LocalVolume(slug="src", path="/mnt/data") + dst = RemoteVolume( + slug="nas-vol", + ssh_endpoint="nas", + path="/backup", + ) + sync = SyncConfig( + slug="ssh-opts-sync", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="nas-vol"), + ) + config = Config( + ssh_endpoints={"nas": server}, + volumes={"src": src, "nas-vol": dst}, + syncs={"ssh-opts-sync": sync}, + ) + resolved = resolve_all_endpoints(config) + script = generate_script( + config, _OPTIONS, now=_NOW, resolved_endpoints=resolved + ) + assert "Compression=yes" in script + assert "ServerAliveInterval=60" in script + assert "StrictHostKeyChecking=no" in script + assert "ForwardAgent=yes" in script + + +class TestShellValidity: + def test_local_to_local_valid_syntax(self) -> None: + config = _local_to_local_config() + script = generate_script(config, _OPTIONS, now=_NOW) + result = subprocess.run( + ["bash", "-n"], + input=script, + capture_output=True, + text=True, + ) + assert result.returncode == 0, f"bash -n failed:\n{result.stderr}" + + def test_local_to_remote_valid_syntax(self) -> None: + config = _local_to_remote_config() + resolved = resolve_all_endpoints(config) + script = generate_script( + config, _OPTIONS, now=_NOW, resolved_endpoints=resolved + ) + result = subprocess.run( + ["bash", "-n"], + input=script, + capture_output=True, + text=True, + ) + assert result.returncode == 0, f"bash -n failed:\n{result.stderr}" + + def test_remote_to_local_valid_syntax(self) -> None: + config = _remote_to_local_config() + resolved = resolve_all_endpoints(config) + script = generate_script( + config, _OPTIONS, now=_NOW, resolved_endpoints=resolved + ) + result = subprocess.run( + ["bash", "-n"], + input=script, + capture_output=True, + text=True, + ) + assert result.returncode == 0, f"bash -n failed:\n{result.stderr}" + + def test_btrfs_valid_syntax(self) -> None: + config = _btrfs_config() + script = generate_script(config, _OPTIONS, now=_NOW) + result = subprocess.run( + ["bash", "-n"], + input=script, + capture_output=True, + text=True, + ) + assert result.returncode == 0, f"bash -n failed:\n{result.stderr}" + + def test_disabled_valid_syntax(self) -> None: + config = _disabled_config() + script = generate_script(config, _OPTIONS, now=_NOW) + result = subprocess.run( + ["bash", "-n"], + input=script, + capture_output=True, + text=True, + ) + assert result.returncode == 0, f"bash -n failed:\n{result.stderr}" + + def test_filters_valid_syntax(self) -> None: + config = _filters_config() + script = generate_script(config, _OPTIONS, now=_NOW) + result = subprocess.run( + ["bash", "-n"], + input=script, + capture_output=True, + text=True, + ) + assert result.returncode == 0, f"bash -n failed:\n{result.stderr}" + + def test_proxy_jump_valid_syntax(self) -> None: + config = _proxy_jump_config() + resolved = resolve_all_endpoints(config) + script = generate_script( + config, _OPTIONS, now=_NOW, resolved_endpoints=resolved + ) + result = subprocess.run( + ["bash", "-n"], + input=script, + capture_output=True, + text=True, + ) + assert result.returncode == 0, f"bash -n failed:\n{result.stderr}" + + +class TestEdgeCases: + def test_empty_config(self) -> None: + config = Config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "#!/bin/bash" in script + assert "All syncs completed successfully" in script + result = subprocess.run( + ["bash", "-n"], + input=script, + capture_output=True, + text=True, + ) + assert result.returncode == 0 + + def test_all_disabled(self) -> None: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + s1 = SyncConfig( + slug="s-one", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + enabled=False, + ) + s2 = SyncConfig( + slug="s-two", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + enabled=False, + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"s-one": s1, "s-two": s2}, + ) + script = generate_script(config, _OPTIONS, now=_NOW) + assert "# : disabled" in script + assert "# sync_s_one" in script + assert "# sync_s_two" in script + result = subprocess.run( + ["bash", "-n"], + input=script, + capture_output=True, + text=True, + ) + assert result.returncode == 0 + + def test_paths_with_spaces(self) -> None: + src = LocalVolume(slug="src", path="/mnt/my data") + dst = LocalVolume(slug="dst", path="/mnt/my backup") + sync = SyncConfig( + slug="space-sync", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"space-sync": sync}, + ) + script = generate_script(config, _OPTIONS, now=_NOW) + # Paths with spaces should be properly quoted + assert "'/mnt/my data/'" in script + assert "'/mnt/my backup/'" in script + result = subprocess.run( + ["bash", "-n"], + input=script, + capture_output=True, + text=True, + ) + assert result.returncode == 0 + + def test_no_config_path(self) -> None: + config = _local_to_local_config() + options = ScriptOptions(config_path=None) + script = generate_script(config, options, now=_NOW) + assert "# Config: " in script + + def test_custom_rsync_options(self) -> None: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="custom-sync", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + rsync_options=RsyncOptions( + default_options_override=["-a", "--delete"], + extra_options=["--bwlimit=1000", "--progress"], + ), + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={"custom-sync": sync}, + ) + script = generate_script(config, _OPTIONS, now=_NOW) + assert "--bwlimit=1000" in script + assert "--progress" in script + + def test_mixed_enabled_disabled(self) -> None: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + enabled = SyncConfig( + slug="active-sync", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + ) + disabled = SyncConfig( + slug="off-sync", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint(volume="dst"), + enabled=False, + ) + config = Config( + volumes={"src": src, "dst": dst}, + syncs={ + "active-sync": enabled, + "off-sync": disabled, + }, + ) + script = generate_script(config, _OPTIONS, now=_NOW) + assert "sync_active_sync()" in script + assert "# : disabled — off-sync" in script + assert ( + "sync_active_sync" " || NBKP_FAILURES=$((NBKP_FAILURES + 1))" + ) in script + assert "# sync_off_sync || NBKP_FAILURES" in script + result = subprocess.run( + ["bash", "-n"], + input=script, + capture_output=True, + text=True, + ) + assert result.returncode == 0 + + +class TestSummary: + def test_failure_count_and_exit(self) -> None: + config = _local_to_local_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert 'nbkp_log "$NBKP_FAILURES sync(s) failed"' in script + assert "exit 1" in script + assert "All syncs completed successfully" in script + + +class TestRemoteBtrfs: + def test_remote_btrfs_snapshot(self) -> None: + server = SshEndpoint( + slug="nas", + host="nas.example.com", + user="backup", + ) + src = LocalVolume(slug="src", path="/mnt/data") + dst = RemoteVolume( + slug="nas-vol", + ssh_endpoint="nas", + path="/volume1", + ) + sync = SyncConfig( + slug="remote-btrfs", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="nas-vol", + btrfs_snapshots=BtrfsSnapshotConfig( + enabled=True, max_snapshots=3 + ), + ), + ) + config = Config( + ssh_endpoints={"nas": server}, + volumes={"src": src, "nas-vol": dst}, + syncs={"remote-btrfs": sync}, + ) + resolved = resolve_all_endpoints(config) + script = generate_script( + config, _OPTIONS, now=_NOW, resolved_endpoints=resolved + ) + # Btrfs commands should be wrapped in SSH + assert "btrfs subvolume snapshot" in script + assert "btrfs property set" in script + assert "btrfs subvolume delete" in script + assert "nas.example.com" in script + result = subprocess.run( + ["bash", "-n"], + input=script, + capture_output=True, + text=True, + ) + assert result.returncode == 0, f"bash -n failed:\n{result.stderr}" + + +class TestHardLink: + def _hl_config(self) -> Config: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="hl-sync", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + hard_link_snapshots=HardLinkSnapshotConfig( + enabled=True, max_snapshots=5 + ), + ), + ) + return Config( + volumes={"src": src, "dst": dst}, + syncs={"hl-sync": sync}, + ) + + def _hl_no_prune_config(self) -> Config: + src = LocalVolume(slug="src", path="/mnt/src") + dst = LocalVolume(slug="dst", path="/mnt/dst") + sync = SyncConfig( + slug="hl-sync", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="dst", + hard_link_snapshots=HardLinkSnapshotConfig(enabled=True), + ), + ) + return Config( + volumes={"src": src, "dst": dst}, + syncs={"hl-sync": sync}, + ) + + def _hl_remote_config(self) -> Config: + server = SshEndpoint( + slug="nas", + host="nas.example.com", + user="backup", + ) + src = LocalVolume(slug="src", path="/mnt/data") + dst = RemoteVolume( + slug="nas-vol", + ssh_endpoint="nas", + path="/volume1", + ) + sync = SyncConfig( + slug="hl-remote", + source=SyncEndpoint(volume="src"), + destination=DestinationSyncEndpoint( + volume="nas-vol", + hard_link_snapshots=HardLinkSnapshotConfig( + enabled=True, max_snapshots=3 + ), + ), + ) + return Config( + ssh_endpoints={"nas": server}, + volumes={"src": src, "nas-vol": dst}, + syncs={"hl-remote": sync}, + ) + + def test_orphan_cleanup(self) -> None: + config = self._hl_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "readlink" in script + assert "latest" in script + + def test_link_dest_resolution(self) -> None: + config = self._hl_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "RSYNC_LINK_DEST" in script + assert "--link-dest" in script + + def test_mkdir_snapshot_dir(self) -> None: + config = self._hl_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "mkdir -p" in script + assert "NBKP_TS" in script + assert "/mnt/dst/snapshots/" in script + + def test_rsync_to_snapshot(self) -> None: + config = self._hl_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "/mnt/dst/snapshots/$NBKP_TS/" in script + + def test_symlink_update(self) -> None: + config = self._hl_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "ln -sfn" in script + assert "snapshots/$NBKP_TS" in script + + def test_symlink_guarded_by_dry_run(self) -> None: + config = self._hl_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert 'if [ "$NBKP_DRY_RUN" = false ]' in script + + def test_prune_with_max_snapshots(self) -> None: + config = self._hl_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "Prune old snapshots" in script + assert "max: 5" in script + assert "rm -rf" in script + + def test_no_prune_without_max(self) -> None: + config = self._hl_no_prune_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "Prune old snapshots" not in script + + def test_no_btrfs_commands(self) -> None: + config = self._hl_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "btrfs subvolume" not in script + assert "btrfs property" not in script + + def test_snapshots_dir_preflight_check(self) -> None: + config = self._hl_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "snapshots/ directory not found" in script + + def test_valid_syntax(self) -> None: + config = self._hl_config() + script = generate_script(config, _OPTIONS, now=_NOW) + result = subprocess.run( + ["bash", "-n"], + input=script, + capture_output=True, + text=True, + ) + assert result.returncode == 0, f"bash -n failed:\n{result.stderr}" + + def test_remote_valid_syntax(self) -> None: + config = self._hl_remote_config() + resolved = resolve_all_endpoints(config) + script = generate_script( + config, + _OPTIONS, + now=_NOW, + resolved_endpoints=resolved, + ) + result = subprocess.run( + ["bash", "-n"], + input=script, + capture_output=True, + text=True, + ) + assert result.returncode == 0, f"bash -n failed:\n{result.stderr}" + + def test_remote_orphan_cleanup_uses_ssh(self) -> None: + config = self._hl_remote_config() + resolved = resolve_all_endpoints(config) + script = generate_script( + config, + _OPTIONS, + now=_NOW, + resolved_endpoints=resolved, + ) + assert "nas.example.com" in script + assert "readlink" in script + + +class TestRelativePaths: + """Tests for --relative-src / --relative-dst path handling.""" + + def test_relative_dst_local_to_local(self) -> None: + config = _local_to_local_config() + options = ScriptOptions( + config_path="/etc/nbkp/config.yaml", + output_file="/mnt/dst/backup.sh", + relative_dst=True, + ) + script = generate_script(config, options, now=_NOW) + # Source stays absolute + assert "/mnt/src/photos/" in script + # Dest uses NBKP_SCRIPT_DIR-relative path + assert "${NBKP_SCRIPT_DIR}" in script + assert "/mnt/dst/backup/" not in script + + def test_relative_src_local_to_local(self) -> None: + config = _local_to_local_config() + options = ScriptOptions( + config_path="/etc/nbkp/config.yaml", + output_file="/mnt/src/backup.sh", + relative_src=True, + ) + script = generate_script(config, options, now=_NOW) + # Source uses NBKP_SCRIPT_DIR-relative path + assert "${NBKP_SCRIPT_DIR}" in script + # Dest stays absolute + assert "/mnt/dst/backup/" in script + + def test_relative_both(self) -> None: + config = _local_to_local_config() + options = ScriptOptions( + config_path="/etc/nbkp/config.yaml", + output_file="/mnt/backup.sh", + relative_src=True, + relative_dst=True, + ) + script = generate_script(config, options, now=_NOW) + assert "${NBKP_SCRIPT_DIR}" in script + # Both absolute paths replaced + assert "/mnt/src/photos/" not in script + assert "/mnt/dst/backup/" not in script + + def test_relative_src_local_to_remote(self) -> None: + config = _local_to_remote_config() + resolved = resolve_all_endpoints(config) + options = ScriptOptions( + config_path="/etc/nbkp/config.yaml", + output_file="/mnt/data/backup.sh", + relative_src=True, + ) + script = generate_script( + config, options, now=_NOW, resolved_endpoints=resolved + ) + # Local source relativized + assert "${NBKP_SCRIPT_DIR}" in script + # Remote dest stays absolute + assert "nas.example.com" in script + + def test_relative_dst_remote_to_local(self) -> None: + config = _remote_to_local_config() + resolved = resolve_all_endpoints(config) + options = ScriptOptions( + config_path="/etc/nbkp/config.yaml", + output_file="/mnt/backup/backup.sh", + relative_dst=True, + ) + script = generate_script( + config, options, now=_NOW, resolved_endpoints=resolved + ) + # Local dest relativized + assert "${NBKP_SCRIPT_DIR}" in script + # Remote source stays absolute + assert "admin@remote.example.com" in script + + def test_script_dir_in_header(self) -> None: + config = _local_to_local_config() + options = ScriptOptions( + config_path="/etc/nbkp/config.yaml", + output_file="/mnt/dst/backup.sh", + relative_dst=True, + ) + script = generate_script(config, options, now=_NOW) + assert "NBKP_SCRIPT_DIR=" in script + assert "BASH_SOURCE[0]" in script + + def test_no_script_dir_when_absolute(self) -> None: + config = _local_to_local_config() + script = generate_script(config, _OPTIONS, now=_NOW) + assert "NBKP_SCRIPT_DIR" not in script + + def test_relative_shell_valid(self) -> None: + config = _local_to_local_config() + options = ScriptOptions( + config_path="/etc/nbkp/config.yaml", + output_file="/mnt/dst/backup.sh", + relative_dst=True, + ) + script = generate_script(config, options, now=_NOW) + result = subprocess.run( + ["bash", "-n"], + input=script, + capture_output=True, + text=True, + ) + assert result.returncode == 0, f"bash -n failed:\n{result.stderr}" + + def test_relative_both_shell_valid(self) -> None: + config = _local_to_local_config() + options = ScriptOptions( + config_path="/etc/nbkp/config.yaml", + output_file="/mnt/backup.sh", + relative_src=True, + relative_dst=True, + ) + script = generate_script(config, options, now=_NOW) + result = subprocess.run( + ["bash", "-n"], + input=script, + capture_output=True, + text=True, + ) + assert result.returncode == 0, f"bash -n failed:\n{result.stderr}" + + def test_relative_btrfs(self) -> None: + config = _btrfs_config() + options = ScriptOptions( + config_path="/etc/nbkp/config.yaml", + output_file="/mnt/dst/backup.sh", + relative_dst=True, + ) + script = generate_script(config, options, now=_NOW) + assert "${NBKP_SCRIPT_DIR}" in script + # Btrfs snapshot uses relative dest paths + assert "btrfs" in script + assert "subvolume" in script + assert "snapshot" in script + assert "${NBKP_SCRIPT_DIR}" in script + result = subprocess.run( + ["bash", "-n"], + input=script, + capture_output=True, + text=True, + ) + assert result.returncode == 0, f"bash -n failed:\n{result.stderr}" + + def test_relative_volume_checks(self) -> None: + config = _local_to_local_config() + options = ScriptOptions( + config_path="/etc/nbkp/config.yaml", + output_file="/mnt/dst/backup.sh", + relative_dst=True, + ) + script = generate_script(config, options, now=_NOW) + # Dst volume check should use relative path + assert "${NBKP_SCRIPT_DIR}" in script + # Src volume check stays absolute + assert "test -f /mnt/src/.nbkp-vol" in script + + def test_relative_preflight_checks(self) -> None: + config = _local_to_local_config() + options = ScriptOptions( + config_path="/etc/nbkp/config.yaml", + output_file="/mnt/dst/backup.sh", + relative_dst=True, + ) + script = generate_script(config, options, now=_NOW) + # Source preflight stays absolute + assert "/mnt/src/photos/.nbkp-src" in script + # Dest preflight uses relative + assert ".nbkp-dst" in script diff --git a/tests/test_sync_ordering.py b/tests/test_sync_ordering.py new file mode 100644 index 0000000..0a48eac --- /dev/null +++ b/tests/test_sync_ordering.py @@ -0,0 +1,110 @@ +"""Tests for nbkp.sync.ordering.""" + +from __future__ import annotations + +import pytest + +from nbkp.config import ( + ConfigError, + DestinationSyncEndpoint, + SyncConfig, + SyncEndpoint, +) +from nbkp.sync.ordering import endpoint_key, sort_syncs + + +def _sync( + slug: str, + src_vol: str, + dst_vol: str, + src_sub: str | None = None, + dst_sub: str | None = None, +) -> SyncConfig: + return SyncConfig( + slug=slug, + source=SyncEndpoint(volume=src_vol, subdir=src_sub), + destination=DestinationSyncEndpoint(volume=dst_vol, subdir=dst_sub), + ) + + +class TestEndpointKey: + def test_with_subdir(self) -> None: + ep = SyncEndpoint(volume="v", subdir="photos") + assert endpoint_key(ep) == ("v", "photos") + + def test_without_subdir(self) -> None: + ep = SyncEndpoint(volume="v") + assert endpoint_key(ep) == ("v", None) + + +class TestSortSyncs: + def test_independent_syncs(self) -> None: + syncs = { + "a": _sync("a", "v1", "v2"), + "b": _sync("b", "v3", "v4"), + } + result = sort_syncs(syncs) + assert set(result) == {"a", "b"} + + def test_dependent_syncs_sorted(self) -> None: + # a writes to (usb, None), b reads from (usb, None) + syncs = { + "b": _sync("b", "usb", "nas"), + "a": _sync("a", "laptop", "usb"), + } + result = sort_syncs(syncs) + assert result.index("a") < result.index("b") + + def test_dependent_syncs_with_subdir(self) -> None: + # a writes to (usb, photos), b reads from (usb, photos) + syncs = { + "b": _sync("b", "usb", "nas", src_sub="photos"), + "a": _sync("a", "laptop", "usb", dst_sub="photos"), + } + result = sort_syncs(syncs) + assert result.index("a") < result.index("b") + + def test_no_dependency_different_subdir(self) -> None: + # a writes to (usb, photos), b reads from (usb, music) + # No dependency since subdirs differ + syncs = { + "a": _sync("a", "laptop", "usb", dst_sub="photos"), + "b": _sync("b", "usb", "nas", src_sub="music"), + } + result = sort_syncs(syncs) + assert set(result) == {"a", "b"} + + def test_chain_dependency(self) -> None: + # a -> b -> c + syncs = { + "c": _sync("c", "v2", "v3"), + "a": _sync("a", "v0", "v1"), + "b": _sync("b", "v1", "v2"), + } + result = sort_syncs(syncs) + assert result.index("a") < result.index("b") + assert result.index("b") < result.index("c") + + def test_cycle_raises_config_error(self) -> None: + # a writes to v1, b reads from v1 and writes to v2, + # a reads from v2 → cycle + syncs = { + "a": _sync("a", "v2", "v1"), + "b": _sync("b", "v1", "v2"), + } + with pytest.raises(ConfigError, match="Cyclic"): + sort_syncs(syncs) + + def test_empty_syncs(self) -> None: + assert sort_syncs({}) == [] + + def test_single_sync(self) -> None: + syncs = {"a": _sync("a", "v1", "v2")} + assert sort_syncs(syncs) == ["a"] + + def test_self_loop_ignored(self) -> None: + # a reads and writes to same volume — not a cycle + syncs = { + "a": _sync("a", "v1", "v1"), + } + assert sort_syncs(syncs) == ["a"]