diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 00000000..95e1b01b --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,28 @@ +FROM almalinux:9 + +# Install basic dependencies +RUN dnf install -y --allowerasing \ + git \ + curl \ + wget \ + which \ + tar \ + gzip \ + bzip2 \ + ca-certificates \ + && dnf clean all + +# Install OpenCode +RUN curl -fsSL https://opencode.ai/install | bash + +# Install Pixi +RUN curl -fsSL https://pixi.sh/install.sh | sh + +# Add Pixi to PATH +ENV PATH="/root/.pixi/bin:${PATH}" + +# Set working directory +WORKDIR /workspace + +# Verify installations +RUN pixi --version || echo "Pixi installation will be verified on container start" diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 00000000..80aa812b --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,20 @@ +{ + "name": "Blop Dev Container", + "build": { + "dockerfile": "Dockerfile" + }, + "mounts": [ + "source=${localEnv:HOME}/.cache/rattler,target=/root/.cache/rattler,type=bind,consistency=cached" + ], + "customizations": { + "vscode": { + "extensions": [ + "ms-python.python", + "ms-python.vscode-pylance", + "ms-toolsai.jupyter" + ] + } + }, + "postCreateCommand": "pixi info", + "remoteUser": "root" +} diff --git a/.github/workflows/README.md b/.github/workflows/README.md index f2d12c3e..8626b7c8 100644 --- a/.github/workflows/README.md +++ b/.github/workflows/README.md @@ -1,6 +1,5 @@ # GitHub Actions CI Overview - ## Workflow ### `ci.yml` @@ -21,7 +20,6 @@ If one of these conditions are met, than the following occurs: This structure avoids duplicate CI jobs when both a PR and a branch push happen. - ### `_check.yml` Used to see if the current branch (on `push`) is already part of an open pull request. diff --git a/.github/workflows/_docs.yml b/.github/workflows/_docs.yml index 094bd1cf..de08e19c 100644 --- a/.github/workflows/_docs.yml +++ b/.github/workflows/_docs.yml @@ -6,12 +6,9 @@ on: deploy_key: required: false - jobs: build_docs: runs-on: ubuntu-latest - strategy: - fail-fast: false defaults: run: @@ -28,19 +25,10 @@ jobs: with: fetch-depth: 0 - - name: Install documentation-building requirements with apt/dpkg - run: | - set -vxeuo pipefail - wget --progress=dot:giga "https://github.com/jgm/pandoc/releases/download/3.1.6.1/pandoc-3.1.6.1-1-amd64.deb" -O /tmp/pandoc.deb - sudo dpkg -i /tmp/pandoc.deb - # conda install -c conda-forge -y pandoc - which pandoc - pandoc --version - - name: Install pixi and activate environment - uses: prefix-dev/setup-pixi@v0.8.14 + uses: prefix-dev/setup-pixi@82d477f15f3a381dbcc8adc1206ce643fe110fb7 # v0.9.3 with: - pixi-version: v0.46.0 + pixi-version: v0.60.0 environments: docs cache: false activate-environment: docs @@ -51,13 +39,6 @@ jobs: - name: Build Docs run: pixi run build-docs - - name: Upload JupyterLite docs artifact - uses: actions/upload-artifact@v4 - with: - name: jupyterlite-docs - path: docs/build/jupyter_execute - if-no-files-found: error - - name: Upload HTML docs artifact uses: actions/upload-artifact@v4 with: @@ -65,7 +46,6 @@ jobs: path: docs/build/html/ - name: Deploy documentation to nsls-ii.github.io - # if: github.repository_owner == 'NSLS-II' && github.ref_name == 'main' if: github.event_name == 'release' # We pin to the SHA, not the tag, for security reasons. # https://docs.github.com/en/free-pro-team@latest/actions/learn-github-actions/security-hardening-for-github-actions#using-third-party-actions diff --git a/.github/workflows/_testing.yml b/.github/workflows/_testing.yml index ba3f2aac..0be313f0 100644 --- a/.github/workflows/_testing.yml +++ b/.github/workflows/_testing.yml @@ -35,7 +35,17 @@ jobs: activate-environment: ${{ matrix.python-version }} - name: Run unit tests - run: pixi run unit-tests + run: | + if [ "${{ matrix.python-version }}" = "py313-cpu" ]; then + pixi run -e ${{ matrix.python-version }} pytest --cov=blop --cov-report=xml --cov-report=term src/blop/tests + else + pixi run tests + fi - - name: Run integration tests - run: pixi run integration-tests + - name: Upload coverage to Codecov + if: matrix.python-version == 'py313-cpu' + uses: codecov/codecov-action@b9fd7d16f6d7d1b5d2bec1a2887e65ceed900238 # v4 + with: + files: ./coverage.xml + fail_ci_if_error: false + verbose: true diff --git a/.opencode/skills/blop-overview/SKILL.md b/.opencode/skills/blop-overview/SKILL.md new file mode 100644 index 00000000..f3117a78 --- /dev/null +++ b/.opencode/skills/blop-overview/SKILL.md @@ -0,0 +1,36 @@ +--- +name: blop-overview +description: Overview of the Blop project structure, workflow, and use-cases +compatibility: opencode +--- + +## What is Blop? + +Blop (Beamline Optimization Package) is a Python library for Bayesian optimization of experimental systems, particularly beamline experiments at synchrotron facilities. It bridges optimization algorithms (using Ax platform, BoTorch, GPyTorch) with the Bluesky ecosystem for data acquisition and device control, enabling efficient exploration of expensive-to-evaluate parameter spaces. + +## Project Structure + +- **`src/blop/ax/`** - Core Agent implementation and Ax platform integration (DOFs, objectives, constraints) +- **`src/blop/bayesian/`** - Bayesian optimization components and models +- **`src/blop/plans.py`** - Bluesky plans for running experiments and optimization iterations +- **`docs/`** - Comprehensive Sphinx documentation with tutorials, how-tos, and explanations + +## Basic Workflow + +1. **Define DOFs** - Specify degrees of freedom (parameters to optimize) using `RangeDOF` or `ChoiceDOF` +1. **Define Objectives** - Specify what to maximize or minimize with `Objective` or `ScalarizedObjective` +1. **Add Constraints** (optional) - Set boundaries with `DOFConstraint` or `OutcomeConstraint` +1. **Create Agent** - Instantiate the optimization agent with DOFs, objectives, and an evaluation function +1. **Run Optimization** - Execute iterations using Bluesky's RunEngine to collect data and update models +1. **Analyze Results** - Review health metrics, convergence, and Pareto frontiers for multi-objective problems + +## Common Use-Cases + +- **Beamline alignment** - Optimizing mirror positions, angles, and curvatures for X-ray focusing +- **Multi-objective optimization** - Balancing competing goals like maximizing intensity while minimizing beam spot size +- **Parameter tuning** - Finding optimal experimental settings when measurements are expensive or time-consuming +- **Automated calibration** - Systematic exploration of device parameters for optimal performance + +## Documentation + +The project has excellent documentation at `/docs/` with detailed tutorials and explanations. **Refer to the docs for specific implementation details, API references, and worked examples.** diff --git a/.opencode/skills/build-docs/SKILL.md b/.opencode/skills/build-docs/SKILL.md new file mode 100644 index 00000000..ea994153 --- /dev/null +++ b/.opencode/skills/build-docs/SKILL.md @@ -0,0 +1,27 @@ +--- +name: build-docs +description: Build and test Sphinx documentation for the blop project +compatibility: opencode +--- + +## What I do + +Build Sphinx documentation using pixi tasks. + +## Commands + +**Build HTML docs:** + +```bash +pixi run build-docs +``` + +**Run doctests:** + +```bash +pixi run doc-tests +``` + +## When to use me + +Use me when building or testing documentation. diff --git a/.opencode/skills/dev-workflow/SKILL.md b/.opencode/skills/dev-workflow/SKILL.md new file mode 100644 index 00000000..41c22f5d --- /dev/null +++ b/.opencode/skills/dev-workflow/SKILL.md @@ -0,0 +1,27 @@ +--- +name: dev-workflow +description: Run development checks: tests, linting, formatting, and type checking for the blop project +compatibility: opencode +--- + +## What I do + +Run fast development checks using pixi tasks. + +## Commands + +**Run tests:** + +```bash +pixi run tests +``` + +**Run all checks (lint, format, typecheck):** + +```bash +pixi run check +``` + +## When to use me + +Use me when running tests or checks during development. diff --git a/.opencode/skills/install-deps/SKILL.md b/.opencode/skills/install-deps/SKILL.md new file mode 100644 index 00000000..44d9fd66 --- /dev/null +++ b/.opencode/skills/install-deps/SKILL.md @@ -0,0 +1,25 @@ +--- +name: install-deps +description: Install and manage blop dependencies using pixi or pip +compatibility: opencode +--- + +## What I do + +Install project dependencies. + +## Commands + +**Using pixi:** + +```bash +pixi install +``` + +## Adding dependencies + +Add to `pyproject.toml` as the single source of truth. `pixi.toml` should install most dependencies via the `pyproject.toml` with few exceptions. + +## When to use me + +Use me when setting up the project or syncing dependencies. diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8fb9ed8e..34720475 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -34,3 +34,11 @@ repos: rev: 0.8.1 hooks: - id: nbstripout + + - repo: https://github.com/executablebooks/mdformat + rev: 0.7.21 + hooks: + - id: mdformat + additional_dependencies: + - mdformat-ruff + - mdformat-frontmatter diff --git a/README.md b/README.md index ec93945d..2233a5a9 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,34 @@ # Blop - Beamline Optimization Package - -[![Testing](https://github.com/NSLS-II/blop/actions/workflows/ci.yml/badge.svg)](https://github.com/NSLS-II/blop/actions/workflows/ci.yml) +[![Testing](https://github.com/bluesky/blop/actions/workflows/ci.yml/badge.svg)](https://github.com/bluesky/blop/actions/workflows/ci.yml) +[![codecov](https://codecov.io/gh/bluesky/blop/branch/main/graph/badge.svg)](https://codecov.io/gh/bluesky/blop) [![PyPI](https://img.shields.io/pypi/v/blop.svg)](https://pypi.python.org/pypi/blop) [![Conda](https://img.shields.io/conda/vn/conda-forge/blop.svg)](https://anaconda.org/conda-forge/blop) -* Free software: 3-clause BSD license -* Documentation: . +- Free software: 3-clause BSD license +- Documentation: . + +## Installation + +```bash +pip install blop +``` + +or with conda: + +```bash +conda install -c conda-forge blop +``` + +## Development + +A devcontainer is available with OpenCode and Pixi pre-installed. To use it: + +```bash +./launch-devcontainer.sh +``` +Or open in VS Code and select "Reopen in Container". ## Citation diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..46b74ff3 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,12 @@ +services: + devcontainer: + build: + context: .devcontainer + dockerfile: Dockerfile + volumes: + - .:/workspace + - ${HOME}/.cache/rattler:/root/.cache/rattler:cached + working_dir: /workspace + stdin_open: true + tty: true + command: /bin/bash diff --git a/docs/source/_includes/installation-code-snippets.rst b/docs/source/_includes/installation-code-snippets.rst new file mode 100644 index 00000000..d3900bd5 --- /dev/null +++ b/docs/source/_includes/installation-code-snippets.rst @@ -0,0 +1,24 @@ +.. snippet-pip-standard-start +.. code:: bash + + $ pip install blop +.. snippet-pip-standard-end + +.. snippet-conda-standard-start +.. code:: bash + + $ conda install -c conda-forge blop +.. snippet-conda-standard-end + +.. snippet-pip-cpu-start +.. code:: bash + + $ pip install uv + $ uv pip install blop[cpu] +.. snippet-pip-cpu-end + +.. snippet-conda-cpu-start +.. code:: bash + + $ conda install -c conda-forge blop pytorch cpuonly -c pytorch +.. snippet-conda-cpu-end diff --git a/docs/source/_static/css/fix-content-height.css b/docs/source/_static/css/fix-content-height.css new file mode 100644 index 00000000..cfe74918 --- /dev/null +++ b/docs/source/_static/css/fix-content-height.css @@ -0,0 +1,29 @@ +/* Let notebook Plotly outputs fill available space */ +.content { + overflow: hidden; + height: auto; + min-height: 0; + max-height: none; + width: 100%; + min-width: 0; + max-width: none; +} + +.content > * { + max-height: none; + width: 100%; + max-width: none; +} + +.content .js-plotly-plot, +.content .plotly-graph-div, +.content .plotly, +.content .plot-container { + width: 100% !important; + max-width: none; +} + +.card-box > * { + overflow: hidden; + left: 0; +} diff --git a/docs/source/_static/css/styles.css b/docs/source/_static/css/styles.css new file mode 100644 index 00000000..0203cb64 --- /dev/null +++ b/docs/source/_static/css/styles.css @@ -0,0 +1,867 @@ +/* RESET & BASE */ +* { + box-sizing: border-box; + margin: 0; + padding: 0; +} + +/* Dark mode specific placeholder styling */ +html[data-theme="dark"] #search-input::placeholder { + color: var(--pst-color-text-muted); + opacity: 0.9; +} + +html[data-theme="dark"] #search-input:focus::placeholder { + opacity: 0.7; +} + +/* Fix header */ +.custom-fixed-header { + background: var(--pst-color-surface); + border-bottom: 1px solid var(--pst-color-border); + padding: 1rem 0; + position: fixed; + top: 0; + left: 0; + right: 0; + width: 100vw; + z-index: 1000; +} + +/* Prevent content hiding under fixed header */ +.bd-main { + margin-top: 90px; +} + +.bd-sidebar-primary { + background-color: var(--pst-color-background); + border-right: 1px solid var(--pst-color-border); + display: flex; + flex: 0 0 auto; + flex-direction: column; + font-size: var(--pst-sidebar-font-size-mobile); + gap: 1rem; + max-height: calc(100vh - var(--pst-header-height)); + overflow-y: auto; + padding: 2rem 1rem 1rem; + position: sticky; + top: var(--pst-header-height); + width: 15%; + max-width: 300px; +} + +.bd-main .bd-content .bd-article-container { + display: flex; + flex-direction: column; + justify-content: start; + max-width: none; + overflow-x: auto; + padding: 1rem; + width: 100%; +} + +.bd-page-width { + max-width: 100%; /* default is 88rem */ +} + +.custom-header-inner { + width: 100%; + padding: 0 2rem; + display: flex; + align-items: center; + justify-content: space-between; +} + +.custom-left { + display: flex; + align-items: center; + gap: 2rem; +} + +.custom-logo { + display: flex; + align-items: center; + gap: 0.5rem; + font-weight: 700; + font-size: 1.5rem; +} + +.custom-logo-icon { + width: 40px; + height: 40px; + background: var(--pst-color-primary); + border-radius: 8px; + display: flex; + align-items: center; + justify-content: center; + color: white; + font-weight: bold; +} + +.custom-logo a { + color: var(--pst-color-primary); + text-decoration: none; +} + +.custom-nav { + display: flex; + gap: 1.5rem; +} + +.custom-nav a { + color: var(--pst-color-text-base); + text-decoration: none; + font-weight: 500; + padding: 0.5rem 0; + border-bottom: 2px solid transparent; + transition: border-color 0.2s; +} + +.custom-nav a:hover { + border-color: var(--pst-color-primary); +} + +.custom-right { + display: flex; + align-items: center; + gap: 1rem; +} + +body { + font-family: var(--pst-font-family-sans-serif); + background-color: var(--pst-color-background); + color: var(--pst-color-text-base); + line-height: 1.6; +} + +/* Warning Banner and*/ +/* Light mode colors */ +:root { + --warning-bg: #ff3300; + --warning-text: white; + --warning-details-bg: #fef9f8; + --warning-details-text: #2d2d2d; + --warning-btn-bg: white; + --warning-btn-text: #ff3300; + --warning-btn-hover-bg: #f0f0f0; + --install-card-bg: #f0f0f0; /* Light mode color */ +} + +/* Dark mode colors */ +html[data-theme="dark"] { + --warning-bg: #74210d; + --warning-text: white; + --warning-details-bg: #1a1a1a; + --warning-details-text: #e0e0e0; + --warning-btn-bg: #2d2d2d; + --warning-btn-text: #ff6633; + --warning-btn-hover-bg: #3d3d3d; + --install-card-bg: #222; /* Dark mode color */ +} + +.warning-banner { + position: fixed; + top: 73px; + left: 0; + width: 100%; + background-color: var(--warning-bg); + color: var(--warning-text); + font-family: sans-serif; + z-index: 999; + box-shadow: 0 2px 5px rgba(0,0,0,0.2); + transition: all 0.01s ease; +} + +.warning-content { + max-width: 1200px; + margin: 0 auto; + padding: 15px 20px; +} + +.warning-main { + display: flex; + align-items: center; + justify-content: center; + gap: 15px; +} + +.warning-text { + font-size: 1rem; + font-weight: 500; +} + +.warning-expand-btn { + background-color: rgba(255, 255, 255, 0.2); + color: var(--warning-text); + border: 1px solid rgba(255, 255, 255, 0.4); + padding: 5px 15px; + border-radius: 4px; + cursor: pointer; + font-size: 0.875rem; + font-weight: 500; + transition: all 1s ease; +} + +.warning-expand-btn:hover { + background-color: rgba(255, 255, 255, 0.3); + border-color: rgba(255, 255, 255, 0.6); +} + +.warning-details { + display: flex; + max-height: 0; + overflow: hidden; + opacity: 0; + transition: all 0.01s ease; + margin-top: 0; +} + +.warning-details.expanded { + max-height: 100%; + opacity: 1; + margin-top: 15px; +} + +.warning-details-box { + display: flex; + flex-direction: column; + align-items: center; + border: 2px solid var(--warning-bg); + background-color: var(--warning-details-bg); + color: var(--warning-details-text); + border-radius: 8px; + padding: 20px; + text-align: left; + margin: 0 auto; + width: 100%; +} + +.warning-details-box p { + margin: 10px 0 5px 0; + font-size: 0.95rem; + color: var(--warning-details-text); + width: 100%; +} + +.warning-details-box p:last-of-type { + margin: 5px 0; + font-size: 0.9rem; + opacity: 0.85; +} + +.warning-close-btn { + background-color: var(--warning-btn-bg); + color: var(--warning-btn-text); + border: none; + padding: 8px 20px; + border-radius: 4px; + cursor: pointer; + font-size: 0.875rem; + font-weight: 600; + margin-top: 10px; + transition: all 0.3s ease; +} + +.warning-close-btn:hover { + background-color: var(--warning-btn-hover-bg); + transform: translateY(-1px); +} + +.warning-banner.hidden { + transform: translateY(-100%); + opacity: 0; + pointer-events: none; +} + +/* HERO SECTION */ +.hero { + text-align: center; + padding: 4rem 2rem; +} + +.hero h1 { + font-size: clamp(2.5rem, 5vw, 4rem); + color: var(--pst-color-text-base); +} + +.subtitle { + margin-top: 0.5rem; + font-size: 1.1rem; + color: var(--pst-color-text-muted); +} + +/* CONTENT LAYOUT */ +.content { + padding: 4rem 2rem; + max-width: 1100px; + margin: auto; +} + +.content h2 { + font-size: 2rem; + margin-bottom: 1.5rem; + color: var(--pst-color-text-base); +} + +.two-column { + display: grid; + gap: 2rem; +} + +@media (min-width: 768px) { + .two-column { + grid-template-columns: 1fr 1fr; + align-items: center; + } +} + +/* IMAGE/VISUAL PLACEHOLDERS */ +.image-placeholder { + background: var(--pst-color-surface); + border: 1px solid var(--pst-color-border); + border-radius: 12px; + height: 300px; + display: flex; + align-items: center; + justify-content: center; +} + +/* INSTALLATION TABS */ +.tabs-container { + margin: 20px 0; +} + +.tabs { + display: flex; + list-style: none; + padding: 0; + margin: 0; + border-bottom: 2px solid var(--pst-color-border); +} + +.tab-item { + padding: 12px 24px; + cursor: pointer; + color: var(--pst-color-text-muted); + font-weight: 600; + transition: all 0.2s; + margin-bottom: -2px; +} + +.tab-item.active { + color: var(--pst-color-primary); + border-bottom: 3px solid var(--pst-color-primary); +} + +/* CODE BLOCKS (Unified Styling) */ +pre, .command-block pre { + background-color: var(--pst-color-inline-code-background) !important; + color: var(--pst-color-text-base) !important; + border: 1px solid var(--pst-color-border) !important; + padding: 16px !important; + border-radius: 8px !important; + font-family: var(--pst-font-family-monospace); + font-size: 0.9rem; + overflow-x: auto; + margin: 1rem 0; +} + +.installation-box{ + background: #1e1e1e; + border: 2px solid #333; + border-radius: 8px; + color: var(--pst-color-text-base); + padding: 1rem; + position: relative; + font-family: 'SFMono-Regular', Consolas, 'Liberation Mono', Menlo, monospace; + font-size: 0.9rem; + box-shadow: inset 0 2px 4px rgba(0,0,0,0.1) +} + +code { + font-family: var(--pst-font-family-monospace); + color: var(--pst-color-text-base); +} + +/* LEARN MORE GRID */ +.card-grid { + display: grid; + gap: 1.5rem; + margin-top: 2rem; +} + +@media (min-width: 600px) { .card-grid { grid-template-columns: repeat(2, 1fr); } } +@media (min-width: 900px) { .card-grid { grid-template-columns: repeat(4, 1fr); } } + +.card, .custom-card { + background-color: var(--pst-color-surface); + border: 1px solid var(--pst-color-border); + padding: 2rem; + border-radius: 16px; + font-weight: 600; + text-align: center; + text-decoration: none; + color: var(--pst-color-text-base); + transition: transform 0.2s, border-color 0.2s; + display: block; +} + +.card:hover, .custom-card:hover { + transform: translateY(-3px); + border-color: var(--pst-color-primary); + color: var(--pst-color-primary); +} + +/* REFERENCES / CITATION BOX */ +.citation-text { + background-color: var(--pst-color-surface); + border-left: 5px solid var(--pst-color-primary); + padding: 20px; + border-radius: 0 8px 8px 0; + color: var(--pst-color-text-base); + line-height: 1.6; + margin: 1.5rem 0; +} + +/* FOOTER */ +footer { + text-align: center; + padding: 2rem; + font-size: 0.9rem; + color: var(--pst-color-text-muted); +} + +/* SEARCH INPUT STYLING */ +#search-input::placeholder { + color: var(--pst-color-text-muted); + opacity: 0.8; +} + +#search-input:focus::placeholder { + opacity: 0.6; +} + +/* Learn More grid layout */ +.learn-more-grid { + display: grid; + gap: 1.5rem; + grid-template-columns: 1fr 1fr; + grid-template-rows: 1fr 1fr; +} + +@media (max-width: 768px) { + .learn-more-grid { + grid-template-columns: 1fr; + grid-template-rows: repeat(4, 1fr); + } +} + +/* Copy button feedback styles */ +.copy-btn { + position: absolute; + right: 0.5rem; + top: 50%; + transform: translateY(-50%); + background: #4CAF50; + color: white; + border: none; + border-radius: 4px; + padding: 0.25rem 0.5rem; + font-size: 0.75rem; + cursor: pointer; + font-weight: 500; +} + +.copy-btn.copied { + background: #2e7d32 !important; + transform: translateY(-50%) scale(0.95); + font-weight: bold; + display: inline-flex; + align-items: center; + justify-content: center; +} + +.copy-btn { + transition: all 0.2s ease; +} + +.copy-btn:hover { + transform: translateY(-50%) translateY(-1px); + opacity: 0.8; + box-shadow: 0 4px 8px rgba(0,0,0,0.2); +} + +/* SPHINX LAYOUT OVERRIDES (Keep these for full-width landing) */ +body.pagename-index .bd-sidebar { display: none !important; } +body.pagename-index .bd-toc { display: none !important; } +body.pagename-index .bd-main { grid-template-columns: 1fr !important; } +body.pagename-index .bd-content { max-width: 100% !important; } +body.pagename-index h1 { text-align: center; font-size: 4rem; margin-bottom: 0.5rem; } + +.index-subtitle { + text-align: center; + font-size: 1.25rem; + color: var(--pst-color-text-muted); + margin-bottom: 4rem; +} + +.index-about-grid { + display: grid; + grid-template-columns: 1.2fr 1fr; + gap: 3rem; + align-items: center; + margin-bottom: 4rem; +} + +@media (max-width: 768px) { + .index-about-grid, + .installation-grid, + .learn-more-grid { + grid-template-columns: 1fr; + } +} + +/* INDEX PAGE STYLES */ + +/* Header styles for index page */ +.index-header { + background: var(--pst-color-surface); + border-bottom: 1px solid var(--pst-color-border); + padding: 1rem 0; + position: fixed; + top: 0; + left: 0; + right: 0; + width: 100vw; + z-index: 1000; + margin: 0; +} + +.index-header-container { + width: 100%; + max-width: none; + margin: 0; + padding: 0 2rem; + display: flex; + align-items: center; + justify-content: space-between; +} + +.index-header-left { + display: flex; + align-items: center; + gap: 2rem; +} + +.index-logo { + display: flex; + align-items: center; + gap: 0.5rem; + font-weight: 700; + font-size: 1.5rem; + color: var(--pst-color-primary); + text-decoration: none; +} + +.index-logo-icon { + width: 40px; + height: 40px; + background: var(--pst-color-primary); + border-radius: 8px; + display: flex; + align-items: center; + justify-content: center; + color: white; + font-weight: bold; +} + +.index-logo a { + color: var(--pst-color-primary); + text-decoration: none; +} + +.index-nav { + display: flex; + gap: 1.5rem; +} + +.index-nav a { + color: var(--pst-color-text-base); + text-decoration: none; + font-weight: 500; + padding: 0.5rem 0; + border-bottom: 2px solid transparent; + transition: border-color 0.2s; +} + +.index-nav a:hover { + border-color: var(--pst-color-primary); +} + +.index-header-right { + display: flex; + align-items: center; + gap: 1rem; +} + +.index-searchbox { + margin: 0; +} + +.index-search-input { + padding: 0.5rem 1rem; + border: 1px solid var(--pst-color-border); + border-radius: 6px; + background: var(--pst-color-background); + color: var(--pst-color-text-base); + width: 200px; + font-size: 0.875rem; +} + +.index-theme-btn { + border: 1px solid var(--pst-color-border); + background: var(--pst-color-background); + color: var(--pst-color-text-base); + padding: 0.5rem; + border-radius: 6px; + display: flex; + align-items: center; + justify-content: center; + cursor: pointer; + transition: all 0.2s; +} + +.index-theme-btn:hover { + background: var(--pst-color-surface); +} + +.index-theme-icon { + font-size: 1rem; +} + +/* Main landing content */ +.landing { + max-width: 1100px; + margin: auto; + padding: 1.5rem 1.5rem 3rem; + padding-top: 9rem; + color: var(--pst-color-text-base); + background-color: var(--pst-color-background); +} + +/* Hero section */ +.hero-section { + text-align: center; + margin-bottom: 4rem; +} + +.hero-title { + font-size: 4.0rem; + margin-bottom: 0.5rem; + color: var(--pst-color-text-base); +} + +.hero-subtitle { + font-size: 1.25rem; + color: var(--pst-color-text-muted); +} + +/* What is Blop section */ +.about-section { + display: grid; + grid-template-columns: 1.2fr 1fr; + gap: 3rem; + align-items: center; + margin-bottom: 4rem; +} + +.about-heading { + color: var(--pst-color-text-base); +} + +.about-viz { + background: var(--pst-color-surface); + border: 1px solid var(--pst-color-border); + border-radius: 16px; + padding: 2rem; + text-align: center; +} + +.about-viz-inner { + height: 220px; + background: var(--pst-color-background); + border: 1px solid var(--pst-color-border); + border-radius: 12px; + display: flex; + align-items: center; + justify-content: center; +} + +.about-viz-placeholder { + color: var(--pst-color-text-muted); +} + +/* Section divider */ +.section-divider { + border: 0; + border-top: 1px solid var(--pst-color-border); +} + +/* Installation section */ +.installation-section { + margin: 4rem 0; + padding: 0 2rem; +} + +.installation-heading { + text-align: center; + color: var(--pst-color-text-base); + margin-bottom: 1rem; +} + +.pytorch-notice { + background: var(--pst-color-surface); + border: 1px solid var(--pst-color-border); + border-left: 4px solid var(--pst-color-primary); + border-radius: 8px; + padding: 1rem; + margin-bottom: 1.5rem; + color: var(--pst-color-text-base); + text-align: left; +} + +.pytorch-notice-title { + margin: 0 0 0.75rem 0; + font-size: 1.1rem; + font-weight: 600; + text-align: center; + color: var(--pst-color-primary); +} + +.pytorch-notice-text { + margin: 0 0 0.5rem 0; + font-size: 0.95rem; + line-height: 1.4; +} + +.pytorch-notice code { + background: var(--pst-color-inline-code-background); + padding: 0.2rem 0.4rem; + border-radius: 4px; + font-weight: 600; + color: var(--pst-color-text-base); +} + +.pytorch-notice-note { + color: var(--pst-color-text-muted); + margin: 0; + font-size: 0.85rem; + font-style: italic; +} + +.pytorch-notice-note a { + color: var(--pst-color-primary); +} + +.installation-grid { + display: grid; + grid-template-columns: 1fr 1fr; + gap: 1.5rem; + margin-bottom: 1rem; +} + +.install-card { + background: var(--install-card-bg, white); + border: 1px solid var(--install-card-border, rgb(178, 175, 175)); + border-radius: 12px; + padding: 1rem; +} + +.install-card-title { + margin-top: 0; + font-size: 1.1rem; +} + +.install-label { + background: var(--install-card-bg, white); + margin-bottom: 0.25rem; + font-size: 0.85rem; +} + + +.install-box-single { + margin-bottom: 0.75rem; +} + +.install-box-wrapper { + position: relative; + display: flex; + align-items: center; +} + +.install-box-multi { + position: relative; + display: flex; + flex-direction: column; + gap: 0.25rem; +} + +.install-code-block { + display: block; + width: 100%; + padding-right: 4.5rem; +} + +.code-prompt { + color: #4CAF50; +} + +.code-command { + color: #E0E0E0; +} + +/* Learn More section */ +.learn-more-section { + margin: 4rem 0; +} + +.learn-more-heading { + text-align: center; + color: var(--pst-color-text-base); + margin-bottom: 2rem; +} + +.learn-more-card-link { + background: var(--pst-color-surface); + border: 1px solid var(--pst-color-border); + border-radius: 16px; + padding: 2rem; + text-align: center; + font-weight: 600; +} + +.learn-more-card-link a { + color: var(--pst-color-text-base); + text-decoration: none; + display: block; +} + +.learn-more-card-desc { + background: var(--pst-color-background); + border-radius: 8px; + padding: 1rem; + margin-top: 0.75rem; + text-align: center; + font-size: 0.875rem; + color: var(--pst-color-text-muted); +} + +.index-reference-box { + background-color: var(--pst-color-surface); + border-left: 5px solid var(--pst-color-primary); + padding: 20px; + font-size: 14px; + line-height: 1.6; + border-radius: 0 8px 8px 0; + margin: 1.5rem 0; +} diff --git a/docs/source/_static/fix-content-height.css b/docs/source/_static/fix-content-height.css deleted file mode 100644 index 010114b7..00000000 --- a/docs/source/_static/fix-content-height.css +++ /dev/null @@ -1,15 +0,0 @@ -/* Override the .content height constraint that limits plotly plot display */ - -/* MyST-NB can't determine the height of the interactive plots, so it defaults to 500px - * which is too small - */ - .content { - height: auto !important; - min-height: 500px; /* Keep a minimum height for layout purposes */ - max-height: none !important; -} - -/* Ensure content can expand as needed for interactive plots */ -.content > * { - max-height: none !important; -} diff --git a/docs/source/_static/javascript/javascript.js b/docs/source/_static/javascript/javascript.js new file mode 100644 index 00000000..efe0d282 --- /dev/null +++ b/docs/source/_static/javascript/javascript.js @@ -0,0 +1,162 @@ +document.addEventListener("DOMContentLoaded", () => { + // Existing tab functionality + document.querySelectorAll('.tab-item').forEach(tab => { + tab.addEventListener('click', () => { + const container = tab.closest('.tabs-container'); + const targetTab = tab.getAttribute('data-tab'); + + // Remove active class from all tabs and panels + container.querySelectorAll('.tab-item') + .forEach(t => t.classList.remove('active')); + container.querySelectorAll('.tab-panel') + .forEach(p => p.classList.remove('active')); + + // Activate selected tab + tab.classList.add('active'); + document.getElementById(targetTab).classList.add('active'); + }); + }); + + // Header search functionality + const searchInput = document.getElementById('search-input'); + if (searchInput) { + searchInput.addEventListener('keypress', function(e) { + if (e.key === 'Enter') { + e.preventDefault(); + const query = this.value.trim(); + if (query) { + // Navigate to search page with query + const searchUrl = new URL('search.html', window.location.origin + window.location.pathname); + searchUrl.searchParams.set('q', query); + window.location.href = searchUrl.toString(); + } + } + }); + } + + // Header theme toggle functionality + const themeButton = document.querySelector('.theme-switch-button'); + const themeIcon = document.getElementById('theme-icon'); + + if (themeButton && themeIcon) { + + // Function to update icon based on current theme + function updateThemeIcon() { + const currentTheme = document.documentElement.dataset.theme || 'auto'; + const isDark = currentTheme === 'dark' || + (currentTheme === 'auto' && window.matchMedia('(prefers-color-scheme: dark)').matches); + + themeIcon.className = isDark ? 'fa fa-moon' : 'fa fa-sun'; + } + + // Initial icon update + updateThemeIcon(); + + // Theme toggle click handler + themeButton.addEventListener('click', function() { + const currentTheme = document.documentElement.dataset.theme || 'auto'; + let newTheme; + + if (currentTheme === 'auto' || currentTheme === 'light') { + newTheme = 'dark'; + } else { + newTheme = 'light'; + } + + // Update theme + document.documentElement.dataset.theme = newTheme; + localStorage.setItem('theme', newTheme); + + // Update icon + updateThemeIcon(); + }); + + // Listen for system theme changes when in auto mode + window.matchMedia('(prefers-color-scheme: dark)').addEventListener('change', updateThemeIcon); + } +}); + +function toggleWarningDetails() { + const details = document.getElementById('warningDetails'); + const btn = document.getElementById('expandBtn'); + const banner = document.getElementById('warningBanner'); + const mainContent = document.querySelector('.landing'); + + if (details.classList.contains('expanded')) { + details.classList.remove('expanded'); + btn.textContent = 'Learn More'; + + // Reset padding to default when collapsed + if (mainContent) { + mainContent.style.transition = 'padding-top 0.3s ease'; + mainContent.style.paddingTop = '9rem'; + } + } else { + details.classList.add('expanded'); + btn.textContent = 'Show Less'; + + // Wait for expansion animation, then adjust padding based on banner height + setTimeout(() => { + if (banner && mainContent) { + const bannerHeight = banner.offsetHeight; + const headerHeight = 73; + const totalOffset = bannerHeight + headerHeight; + mainContent.style.transition = 'padding-top 0.2s ease'; + mainContent.style.paddingTop = `${totalOffset + 24}px`; + } + }, 50); + } +} + +function closeWarningBanner() { + const banner = document.getElementById('warningBanner'); + const mainContent = document.querySelector('.landing'); + + banner.classList.add('hidden'); + + // Adjust main content padding when banner is closed + setTimeout(() => { + if (mainContent) { + mainContent.style.transition = 'padding-top 0.3s ease'; + mainContent.style.paddingTop = '6rem'; + } + }, 300); +} + +function copyCode(btn) { + const text = btn.parentElement.querySelector(".code-block").innerText; + + if (navigator.clipboard && window.isSecureContext) { + navigator.clipboard.writeText(text) + .catch(() => fallbackCopy(text)); + } else { + fallbackCopy(text); + } +} + +function fallbackCopy(text) { + const textArea = document.createElement("textarea"); + textArea.value = text; + + textArea.style.position = "fixed"; + textArea.style.left = "-9999px"; + + document.body.appendChild(textArea); + textArea.focus(); + textArea.select(); + + try { + document.execCommand("copy"); + } catch (err) { + console.error("Fallback: Unable to copy", err); + } + + document.body.removeChild(textArea); +} + +// Add click event listeners to all copy buttons +document.addEventListener('DOMContentLoaded', () => { + document.querySelectorAll('.copy-btn').forEach(button => { + button.addEventListener('click', () => copyCode(button)); + }); +}); diff --git a/docs/source/_templates/sections/header.html b/docs/source/_templates/sections/header.html new file mode 100644 index 00000000..487240d8 --- /dev/null +++ b/docs/source/_templates/sections/header.html @@ -0,0 +1,53 @@ +
+
+ +
+ + + + + +
+ +
+ + + + + + {% include "components/search-button.html" %} + + + {% include "components/theme-switcher.html" %} + +
+ +
+
diff --git a/docs/source/conf.py b/docs/source/conf.py index 1756eed1..d45fefbb 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -144,7 +144,12 @@ # further. For a list of options available for each theme, see the # documentation. # -# html_theme_options = {} +html_theme_options = { + # "navbar_start": ["navbar-logo"], + # "navbar_center": ["navbar-nav"], + # "navbar_end": [], + "use_edit_page_button": True, # enables the edit button on regular pages +} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, @@ -168,9 +173,19 @@ # Add custom CSS to fix .content height constraint for plotly plots html_css_files = [ - "fix-content-height.css", + "css/fix-content-height.css", + "css/styles.css", ] +html_context = { + "github_user": "bluesky", + "github_repo": "blop", + "github_version": "main", + "doc_path": "docs", +} + +html_show_sourcelink = False + # -- Options for LaTeX output --------------------------------------------- @@ -239,7 +254,7 @@ intersphinx_mapping = { "python": ("https://docs.python.org/3/", None), "numpy": ("https://numpy.org/doc/stable/", None), - "pandas": ("https://pandas.pydata.org/pandas-docs/stable", None), + # "pandas": ("https://pandas.pydata.org/pandas-docs/stable", None), "matplotlib": ("https://matplotlib.org/stable", None), "bluesky": ("https://blueskyproject.io/bluesky/main", None), "ophyd-async": ("https://blueskyproject.io/ophyd-async/main", None), diff --git a/docs/source/how-to-guides.rst b/docs/source/how-to-guides.rst index dae99aa4..da738adb 100644 --- a/docs/source/how-to-guides.rst +++ b/docs/source/how-to-guides.rst @@ -7,6 +7,7 @@ How-to Guides how-to-guides/use-ophyd-devices.rst how-to-guides/attach-data-to-experiments.rst how-to-guides/custom-generation-strategies.rst + how-to-guides/manual-suggestions.rst how-to-guides/set-dof-constraints.rst how-to-guides/set-outcome-constraints.rst how-to-guides/acquire-baseline.rst diff --git a/docs/source/how-to-guides/acquire-baseline.rst b/docs/source/how-to-guides/acquire-baseline.rst index bdb6e451..ae7efd75 100644 --- a/docs/source/how-to-guides/acquire-baseline.rst +++ b/docs/source/how-to-guides/acquire-baseline.rst @@ -6,7 +6,7 @@ from bluesky.protocols import NamedMovable, Readable, Status, Hints, HasHints, HasParent from bluesky.run_engine import RunEngine - from bluesky.callbacks.tiled_writer import TiledWriter + from bluesky_tiled_plugins import TiledWriter from tiled.client import from_uri from tiled.server import SimpleTiledServer diff --git a/docs/source/how-to-guides/custom-generation-strategies.rst b/docs/source/how-to-guides/custom-generation-strategies.rst index 854171ad..e9b7ce43 100644 --- a/docs/source/how-to-guides/custom-generation-strategies.rst +++ b/docs/source/how-to-guides/custom-generation-strategies.rst @@ -6,7 +6,7 @@ from bluesky.protocols import NamedMovable, Readable, Status, Hints, HasHints, HasParent from bluesky.run_engine import RunEngine - from bluesky.callbacks.tiled_writer import TiledWriter + from bluesky_tiled_plugins import TiledWriter from tiled.client import from_uri from tiled.server import SimpleTiledServer diff --git a/docs/source/how-to-guides/manual-suggestions.rst b/docs/source/how-to-guides/manual-suggestions.rst new file mode 100644 index 00000000..33db0b86 --- /dev/null +++ b/docs/source/how-to-guides/manual-suggestions.rst @@ -0,0 +1,253 @@ +.. testsetup:: + + from unittest.mock import MagicMock + from typing import Any + import time + + from bluesky.protocols import NamedMovable, Readable, Status, Hints, HasHints, HasParent + from bluesky.run_engine import RunEngine + from tiled.client.container import Container + + class AlwaysSuccessfulStatus(Status): + def add_callback(self, callback) -> None: + callback(self) + + def exception(self, timeout = 0.0): + return None + + @property + def done(self) -> bool: + return True + + @property + def success(self) -> bool: + return True + + class ReadableSignal(Readable, HasHints, HasParent): + def __init__(self, name: str) -> None: + self._name = name + self._value = 0.0 + + @property + def name(self) -> str: + return self._name + + @property + def hints(self) -> Hints: + return { + "fields": [self._name], + "dimensions": [], + "gridding": "rectilinear", + } + + @property + def parent(self) -> Any | None: + return None + + def read(self): + return { + self._name: { "value": self._value, "timestamp": time.time() } + } + + def describe(self): + return { + self._name: { "source": self._name, "dtype": "number", "shape": [] } + } + + class MovableSignal(ReadableSignal, NamedMovable): + def __init__(self, name: str, initial_value: float = 0.0) -> None: + super().__init__(name) + self._value: float = initial_value + + def set(self, value: float) -> Status: + self._value = value + return AlwaysSuccessfulStatus() + + db = MagicMock(spec=Container) + RE = RunEngine({}) + + sensor = ReadableSignal("signal") + motor_x = MovableSignal("motor_x") + motor_y = MovableSignal("motor_y") + + # Mock evaluation function for examples + def evaluation_function(uid: str, suggestions: list[dict]) -> list[dict]: + """Mock evaluation function that returns constant outcomes.""" + outcomes = [] + for suggestion in suggestions: + outcome = { + "_id": suggestion["_id"], + "signal": 0.5, + } + outcomes.append(outcome) + return outcomes + +Manual Point Injection +====================== + +This guide shows how to inject custom parameter combinations based on domain knowledge or external sources, alongside optimizer-driven suggestions. + +Basic Usage +----------- + +To evaluate manually-specified points, use the ``sample_suggestions`` method with parameter combinations (without ``"_id"`` keys). The optimizer will automatically register these trials and incorporate the results into the Bayesian model. + +.. testcode:: + + from blop.ax import Agent, RangeDOF, Objective + + # Configure agent + agent = Agent( + sensors=[sensor], + dofs=[ + RangeDOF(actuator=motor_x, bounds=(-10, 10), parameter_type="float"), + RangeDOF(actuator=motor_y, bounds=(-10, 10), parameter_type="float"), + ], + objectives=[Objective(name="signal", minimize=False)], + evaluation_function=evaluation_function, + ) + + # Define points of interest + manual_points = [ + {'motor_x': 0.5, 'motor_y': 1.0}, # Center region + {'motor_x': 0.0, 'motor_y': 0.0}, # Origin + ] + + # Evaluate them + RE(agent.sample_suggestions(manual_points)) + +.. testoutput:: + :hide: + + ... + +The manual points will be treated just like optimizer suggestions - they'll be tracked, evaluated, and used to improve the model. + +Mixed Workflows +--------------- + +You can combine optimizer suggestions with manual points throughout your optimization: + +.. testcode:: + + from blop.ax import Agent, RangeDOF, Objective + + agent = Agent( + sensors=[sensor], + dofs=[ + RangeDOF(actuator=motor_x, bounds=(-10, 10), parameter_type="float"), + RangeDOF(actuator=motor_y, bounds=(-10, 10), parameter_type="float"), + ], + objectives=[Objective(name="signal", minimize=False)], + evaluation_function=evaluation_function, + ) + + # Run optimizer for initial exploration + RE(agent.optimize(iterations=3)) + + # Try a manual point based on domain insight + manual_point = [{'motor_x': 0.75, 'motor_y': 0.25}] + RE(agent.sample_suggestions(manual_point)) + + # Continue optimization + RE(agent.optimize(iterations=3)) + +.. testoutput:: + :hide: + + ... + +The optimizer will incorporate your manual point into its model and use it to inform future suggestions. + +Manual Approval Workflow +------------------------- + +You can review optimizer suggestions before running them by using ``suggest()`` to get suggestions without acquiring data: + +.. testcode:: + + from blop.ax import Agent, RangeDOF, Objective + + agent = Agent( + sensors=[sensor], + dofs=[ + RangeDOF(actuator=motor_x, bounds=(-10, 10), parameter_type="float"), + RangeDOF(actuator=motor_y, bounds=(-10, 10), parameter_type="float"), + ], + objectives=[Objective(name="signal", minimize=False)], + evaluation_function=evaluation_function, + ) + + # Get suggestions without running + suggestions = agent.suggest(num_points=5) + + # Review and filter + print("Reviewing suggestions:") + for s in suggestions: + trial_id = s['_id'] + x = s['motor_x'] + y = s['motor_y'] + print(f" Trial {trial_id}: x={x:.2f}, y={y:.2f}") + + # Only run approved suggestions + approved = [s for s in suggestions if s['motor_x'] > -5.0] + + if approved: + RE(agent.sample_suggestions(approved)) + else: + print("No suggestions approved") + +.. testoutput:: + + Reviewing suggestions: + ... + +This workflow allows you to apply safety checks, domain constraints, or other validation before running trials. + +Iterative Refinement +-------------------- + +A common pattern is to alternate between automated optimization and targeted manual exploration: + +.. testcode:: + + from blop.ax import Agent, RangeDOF, Objective + + agent = Agent( + sensors=[sensor], + dofs=[ + RangeDOF(actuator=motor_x, bounds=(-10, 10), parameter_type="float"), + RangeDOF(actuator=motor_y, bounds=(-10, 10), parameter_type="float"), + ], + objectives=[Objective(name="signal", minimize=False)], + evaluation_function=evaluation_function, + ) + + for cycle in range(3): + # Automated exploration + RE(agent.optimize(iterations=2, n_points=2)) + + # Review results and manually probe interesting regions + # (Look at plots, current best, etc.) + + # Try edge cases or special points + if cycle == 1: + # After first cycle, check boundaries + boundary_points = [ + {'motor_x': -10.0, 'motor_y': 0.0}, + {'motor_x': 10.0, 'motor_y': 0.0}, + ] + RE(agent.sample_suggestions(boundary_points)) + +.. testoutput:: + :hide: + + ... + +See Also +-------- + +- :meth:`blop.ax.Agent.suggest` - Get optimizer suggestions without running +- :meth:`blop.ax.Agent.sample_suggestions` - Evaluate specific suggestions +- :meth:`blop.ax.Agent.optimize` - Run full optimization loop +- :class:`blop.protocols.CanRegisterSuggestions` - Protocol for manual trial support diff --git a/docs/source/how-to-guides/use-tiled.rst b/docs/source/how-to-guides/use-tiled.rst index 5a53cc10..6797cc23 100644 --- a/docs/source/how-to-guides/use-tiled.rst +++ b/docs/source/how-to-guides/use-tiled.rst @@ -75,7 +75,7 @@ To access the data for optimization, you have to connect to a Tiled server insta .. testcode:: from bluesky.run_engine import RunEngine - from bluesky.callbacks.tiled_writer import TiledWriter + from bluesky_tiled_plugins import TiledWriter from tiled.client import from_uri from tiled.server import SimpleTiledServer diff --git a/docs/source/index.rst b/docs/source/index.rst index f00b53a4..57c7f060 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -1,18 +1,5 @@ -What is Blop? -------------- - -Blop is a Python library for performing optimization for beamline experiments. It is designed to integrate nicely with the Bluesky ecosystem and primarily acts as a bridge between optimization routines and fine-grained data acquisition and control. Our goal is to provide a simple and practical data-driven optimization interface for beamline experiments. - - -Documentation structure ------------------------ - -- :doc:`installation` - Installation instructions -- :doc:`how-to-guides` - How-to guides for common tasks -- :doc:`explanation` - Explanation of the underlying concepts -- :doc:`tutorials` - Tutorials for learning -- :doc:`reference` - Reference documentation for the API -- :doc:`release-history` - Release history +Blop +==== .. toctree:: :maxdepth: 2 @@ -25,27 +12,182 @@ Documentation structure reference release-history -Citation --------- + +.. raw:: html + +

a BeamLine Optimization Package

+
+
+ +What is Blop? +------------- + +Blop is a Python library for performing optimization for beamline +experiments. It is designed to integrate nicely with the Bluesky +ecosystem and primarily targets rapid beamline data acquisition +and control. + +Our goal is to provide a simple and practical data-driven +optimization interface for beamline experiments. + +.. raw:: html + +
+
+
+ [Visualization Placeholder] +
+
+
+
+ +Installation +------------ + +.. raw:: html + +
+ +.. container:: install-card + + .. container:: install-card-title + + Via PyPI + + .. container:: install-label + + Standard (GPU support): + + .. include:: _includes/installation-code-snippets.rst + :start-after: .. snippet-pip-standard-start + :end-before: .. snippet-pip-standard-end + + .. container:: install-label + + CPU-only (containers, CI/CD, laptops): + + .. include:: _includes/installation-code-snippets.rst + :start-after: .. snippet-pip-cpu-start + :end-before: .. snippet-pip-cpu-end + +.. container:: install-card + + .. container:: install-card-title + + Via Conda-forge + + .. container:: install-label + + Standard: + + .. include:: _includes/installation-code-snippets.rst + :start-after: .. snippet-conda-standard-start + :end-before: .. snippet-conda-standard-end + + .. container:: install-label + + CPU-only: + + .. include:: _includes/installation-code-snippets.rst + :start-after: .. snippet-conda-cpu-start + :end-before: .. snippet-conda-cpu-end + + +.. raw:: html + +
+ +For additional installation instructions, refer to the :doc:`installation` guide. + +.. raw:: html + +
+ +Learn More! +----------- + +.. raw:: html + +
+ +.. container:: learn-more-item + + .. container:: learn-more-card-link + + :doc:`Tutorials ` + + .. container:: learn-more-card-desc + + Step-by-step guides to get started with Blop fundamentals and basic workflows. + +.. container:: learn-more-item + + .. container:: learn-more-card-link + + :doc:`How-To Guides ` + + .. container:: learn-more-card-desc + + Practical recipes and solutions for specific beamline optimization tasks. + +.. container:: learn-more-item + + .. container:: learn-more-card-link + + :doc:`References ` + + .. container:: learn-more-card-desc + + Complete API documentation, class references, and technical specifications. + +.. container:: learn-more-item + + .. container:: learn-more-card-link + + :doc:`Release History ` + + .. container:: learn-more-card-desc + + Version updates, new features, bug fixes, and changelog for the current release. + +.. raw:: html + +
+
+ +References +---------- If you use this package in your work, please cite the following paper: - Morris, T. W., Rakitin, M., Du, Y., Fedurin, M., Giles, A. C., Leshchev, D., Li, W. H., Romasky, B., Stavitski, E., Walter, A. L., Moeller, P., Nash, B., & Islegen-Wojdyla, A. (2024). A general Bayesian algorithm for the autonomous alignment of beamlines. Journal of Synchrotron Radiation, 31(6), 1446–1456. https://doi.org/10.1107/S1600577524008993 +.. raw:: html + +
+ +Morris, T. W., Rakitin, M., Du, Y., Fedurin, M., Giles, A. C., Leshchev, +D., Li, W. H., Romasky, B., Stavitski, E., Walter, A. L., Moeller, P., +Nash, B., & Islegen-Wojdyla, A. (2024). A general Bayesian algorithm for +the autonomous alignment of beamlines. *Journal of Synchrotron Radiation*, +31(6), 1446–1456. `https://doi.org/10.1107/S1600577524008993 `_ + +.. raw:: html + +
-BibTeX: +**BibTeX:** .. code-block:: bibtex - @Article{Morris2024, - author = {Morris, Thomas W. and Rakitin, Max and Du, Yonghua and Fedurin, Mikhail and Giles, Abigail C. and Leshchev, Denis and Li, William H. and Romasky, Brianna and Stavitski, Eli and Walter, Andrew L. and Moeller, Paul and Nash, Boaz and Islegen-Wojdyla, Antoine}, - journal = {Journal of Synchrotron Radiation}, - title = {{A general Bayesian algorithm for the autonomous alignment of beamlines}}, - year = {2024}, - month = {Nov}, - number = {6}, - pages = {1446--1456}, - volume = {31}, - doi = {10.1107/S1600577524008993}, - keywords = {Bayesian optimization, automated alignment, synchrotron radiation, digital twins, machine learning}, - url = {https://doi.org/10.1107/S1600577524008993}, - } + @Article{Morris2024, + author = {Morris, Thomas W. and Rakitin, Max and Du, Yonghua and Fedurin, Mikhail and Giles, Abigail C. and Leshchev, Denis and Li, William H. and Romasky, Brianna and Stavitski, Eli and Walter, Andrew L. and Moeller, Paul and Nash, Boaz and Islegen-Wojdyla, Antoine}, + journal = {Journal of Synchrotron Radiation}, + title = {A general Bayesian algorithm for the autonomous alignment of beamlines}, + year = {2024}, + month = {Nov}, + number = {6}, + pages = {1446--1456}, + volume = {31}, + doi = {10.1107/S1600577524008993}, + keywords = {Bayesian optimization, automated alignment, synchrotron radiation, digital twins, machine learning}, + url = {https://doi.org/10.1107/S1600577524008993}, + } diff --git a/docs/source/installation.rst b/docs/source/installation.rst index 1613f722..b3b27981 100644 --- a/docs/source/installation.rst +++ b/docs/source/installation.rst @@ -11,15 +11,15 @@ The package works with Python 3.10+ and can be installed from both PyPI and/or c To install the package using the ``pip`` package manager, run the following command: -.. code:: bash - - $ pip install blop +.. include:: _includes/installation-code-snippets.rst + :start-after: .. snippet-pip-standard-start + :end-before: .. snippet-pip-standard-end To install the package using the ``conda`` package manager, run the following command: -.. code:: bash - - $ conda install -c conda-forge blop +.. include:: _includes/installation-code-snippets.rst + :start-after: .. snippet-conda-standard-start + :end-before: .. snippet-conda-standard-end PyTorch Acceleration Options ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -27,10 +27,9 @@ PyTorch Acceleration Options By default, ``blop`` installs PyTorch with GPU support (~7GB). For environments without GPU support, or to reduce installation size, you can install a CPU-only version (~900MB) using ``uv``: -.. code:: bash - - $ pip install uv - $ uv pip install blop[cpu] +.. include:: _includes/installation-code-snippets.rst + :start-after: .. snippet-pip-cpu-start + :end-before: .. snippet-pip-cpu-end This is particularly useful for: @@ -46,9 +45,10 @@ This is particularly useful for: For conda users who want CPU-only PyTorch: -.. code:: bash +.. include:: _includes/installation-code-snippets.rst + :start-after: .. snippet-conda-cpu-start + :end-before: .. snippet-conda-cpu-end - $ conda install -c conda-forge blop pytorch cpuonly -c pytorch Running the tutorials ^^^^^^^^^^^^^^^^^^^^^ @@ -69,6 +69,7 @@ Your third option is to simply convert the tutorials to ipynb format and use wha $ jupytext --to ipynb docs/source/tutorials/*.md + .. _for-developers: For developers diff --git a/docs/source/tutorials/README.md b/docs/source/tutorials/README.md index 4e5026ef..bc342850 100644 --- a/docs/source/tutorials/README.md +++ b/docs/source/tutorials/README.md @@ -4,7 +4,7 @@ This repository contains tutorials that are designed to be run using Jupyter Lab ## Running the Tutorials -To run and experiment with the tutorial code, you'll need to open them inside Jupyter Lab. To do this, run: +To run and experiment with the tutorial code, you'll need to open them inside Jupyter Lab. To do this, run: ```bash pixi run start-jupyter @@ -13,6 +13,7 @@ pixi run start-jupyter This will launch Jupyter Lab in your browser, where you can navigate to and run the tutorial notebooks. If you prefer to work with the tutorials in your local editor as standard notebooks, you can convert them back to the .ipynb format: + ```bash jupytext --to notebook ``` diff --git a/docs/source/tutorials/simple-experiment.md b/docs/source/tutorials/simple-experiment.md index 368e4e93..c0c0a764 100644 --- a/docs/source/tutorials/simple-experiment.md +++ b/docs/source/tutorials/simple-experiment.md @@ -28,7 +28,7 @@ from blop.ax import Agent, RangeDOF, Objective from bluesky.protocols import NamedMovable, Readable, Status, Hints, HasHints, HasParent from bluesky.run_engine import RunEngine -from bluesky.callbacks.tiled_writer import TiledWriter +from bluesky_tiled_plugins import TiledWriter from bluesky.callbacks.best_effort import BestEffortCallback from tiled.client import from_uri from tiled.client.container import Container diff --git a/docs/source/tutorials/xrt-kb-mirrors.md b/docs/source/tutorials/xrt-kb-mirrors.md index 61378e87..0acf8711 100644 --- a/docs/source/tutorials/xrt-kb-mirrors.md +++ b/docs/source/tutorials/xrt-kb-mirrors.md @@ -37,19 +37,28 @@ Before we can optimize, we need to set up the data infrastructure. Blop uses [Bl ```{code-cell} ipython3 import logging +from pathlib import PurePath +import cv2 +import numpy as np import matplotlib.pyplot as plt from tiled.client.container import Container from bluesky.callbacks import best_effort -from bluesky.callbacks.tiled_writer import TiledWriter +from bluesky_tiled_plugins import TiledWriter from bluesky.run_engine import RunEngine +from event_model import RunRouter from tiled.client import from_uri # type: ignore[import-untyped] from tiled.server import SimpleTiledServer +from ophyd_async.core import StaticPathProvider, UUIDFilenameProvider from blop.ax import Agent, RangeDOF, Objective -from blop.sim.xrt_beamline import TiledBeamline from blop.protocols import EvaluationFunction +# Import simulation devices (requires: pip install -e sim/) +from blop_sim.backends.xrt import XRTBackend +from blop_sim.devices.xrt import KBMirror +from blop_sim.devices import DetectorDevice + # Suppress noisy logs from httpx logging.getLogger("httpx").setLevel(logging.WARNING) @@ -65,11 +74,16 @@ Next, we create a local Tiled server. The `TiledWriter` callback will save exper tiled_server = SimpleTiledServer(readable_storage=[DETECTOR_STORAGE]) tiled_client = from_uri(tiled_server.uri) tiled_writer = TiledWriter(tiled_client) -bec = best_effort.BestEffortCallback() -bec.disable_plots() + +def bec_factory(name, doc): + bec = best_effort.BestEffortCallback() + bec.disable_plots() + return [bec], [] + +rr = RunRouter([bec_factory]) RE = RunEngine({}) -RE.subscribe(bec) +RE.subscribe(rr) RE.subscribe(tiled_writer) ``` @@ -85,14 +99,23 @@ VERTICAL_BOUNDS = (25000, 45000) # Optimal ~38000 is in upper portion HORIZONTAL_BOUNDS = (15000, 35000) # Optimal ~21000 is in lower portion ``` -Now we create the beamline and define our DOFs. Each `RangeDOF` wraps an actuator (something we can move) with bounds that constrain the search space: +Now we create the simulation backend and individual devices. Each `RangeDOF` wraps an actuator (something we can move) with bounds that constrain the search space: ```{code-cell} ipython3 -beamline = TiledBeamline(name="bl") +# Create XRT simulation backend +backend = XRTBackend() + +# Create individual KB mirror devices +kbv = KBMirror(backend, mirror_index=0, initial_radius=38000, name="kbv") +kbh = KBMirror(backend, mirror_index=1, initial_radius=21000, name="kbh") + +# Create detector device +det = DetectorDevice(backend, StaticPathProvider(UUIDFilenameProvider(), PurePath(DETECTOR_STORAGE)), name="det") +# Define DOFs using mirror radius signals dofs = [ - RangeDOF(actuator=beamline.kbv_dsv, bounds=VERTICAL_BOUNDS, parameter_type="float"), - RangeDOF(actuator=beamline.kbh_dsh, bounds=HORIZONTAL_BOUNDS, parameter_type="float"), + RangeDOF(actuator=kbv.radius, bounds=VERTICAL_BOUNDS, parameter_type="float"), + RangeDOF(actuator=kbh.radius, bounds=HORIZONTAL_BOUNDS, parameter_type="float"), ] ``` @@ -103,15 +126,16 @@ The `actuator` is the device that physically changes the parameter. The `bounds` **Objectives** specify what you want to optimize. Each objective has a name (matching a value your evaluation function will return) and a direction: `minimize=True` for things you want smaller, `minimize=False` for things you want larger. For our KB mirrors, we have three objectives: -- **Intensity** (`bl_det_sum`): We want *more* signal → `minimize=False` -- **Spot width X** (`bl_det_wid_x`): We want a *tighter* spot → `minimize=True` -- **Spot width Y** (`bl_det_wid_y`): We want a *tighter* spot → `minimize=True` + +- **Intensity** (`intensity`): We want *more* signal → `minimize=False` +- **Spot width** (`width`): We want a *tighter* spot → `minimize=True` +- **Spot height** (`height`): We want a *tighter* spot → `minimize=True` ```{code-cell} ipython3 objectives = [ - Objective(name="bl_det_sum", minimize=False), - Objective(name="bl_det_wid_x", minimize=True), - Objective(name="bl_det_wid_y", minimize=True), + Objective(name="intensity", minimize=False), + Objective(name="width", minimize=True), + Objective(name="height", minimize=True), ] ``` @@ -122,20 +146,63 @@ With multiple objectives that can conflict (maximizing intensity might increase The **evaluation function** is the bridge between raw experimental data and the optimizer. After each measurement, the optimizer needs to know how well that configuration performed. Your evaluation function: 1. Receives a run UID and the suggestions that were tested -2. Reads the relevant data from Tiled -3. Returns outcome values for each suggestion +1. Reads the beam images from Tiled +1. Computes statistics (intensity, width, centroid, etc.) from the images +1. Returns outcome values for each suggestion ```{code-cell} ipython3 class DetectorEvaluation(EvaluationFunction): def __init__(self, tiled_client: Container): self.tiled_client = tiled_client - + + def _compute_stats(self, image: np.array) -> tuple[str, str, str]: + """Compute integrated intensity and beam width/height from a beam image.""" + # Convert to grayscale + gray = image.squeeze() + if gray.ndim == 3: + gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY) + + # Convert data type for numerical stability + gray = gray.astype(np.float32) + + # Smooth w/ (5, 5) kernel and threshold + blurred = cv2.GaussianBlur(gray, (5, 5), 0) + max_val = np.max(blurred) + if max_val == 0: + return 0.0, 0.0, 0.0 + + thresh_value = 0.2 * max_val + _, thresh = cv2.threshold(blurred, thresh_value, 255, cv2.THRESH_TOZERO) + + # Total integrated intensity + total_intensity = np.sum(thresh) + + # Beam width/height from intensity-weighted second moment (σ) + total_weight = np.sum(thresh) + if total_weight <= 0: + return total_intensity, 0.0, 0.0 + + h, w = thresh.shape + y_coords = np.arange(h, dtype=np.float32) + x_coords = np.arange(w, dtype=np.float32) + + x_bar = np.sum(x_coords * np.sum(thresh, axis=0)) / total_weight + y_bar = np.sum(y_coords * np.sum(thresh, axis=1)) / total_weight + + x_var = np.sum((x_coords - x_bar) ** 2 * np.sum(thresh, axis=0)) / total_weight + y_var = np.sum((y_coords - y_bar) ** 2 * np.sum(thresh, axis=1)) / total_weight + + width = 2 * np.sqrt(x_var) # ~2σ width + height = 2 * np.sqrt(y_var) # ~2σ height + + return total_intensity, width, height + def __call__(self, uid: str, suggestions: list[dict]) -> list[dict]: outcomes = [] run = self.tiled_client[uid] - bl_det_sum = run["primary/bl_det_sum"].read() - bl_det_wid_x = run["primary/bl_det_wid_x"].read() - bl_det_wid_y = run["primary/bl_det_wid_y"].read() + + # Read beam images from detector + images = run["primary/det_image"].read() # Suggestions are stored in the start document's metadata when # using the `blop.plans.default_acquire` plan. @@ -143,22 +210,31 @@ class DetectorEvaluation(EvaluationFunction): # a custom acquisition plan. suggestion_ids = [suggestion["_id"] for suggestion in run.metadata["start"]["blop_suggestions"]] + # Compute statistics from each image for idx, sid in enumerate(suggestion_ids): + image = images[idx] + intensity, width, height = self._compute_stats(image) + outcome = { "_id": sid, - "bl_det_sum": bl_det_sum[idx], - "bl_det_wid_x": bl_det_wid_x[idx], - "bl_det_wid_y": bl_det_wid_y[idx], + "intensity": intensity, + "width": width, + "height": height, } outcomes.append(outcome) return outcomes ``` -Note the `_id` field—this links each outcome back to the suggestion that produced it. This is essential when multiple configurations are tested in a single run. +Note how we: + +1. Read the image data from the stored detector data +1. Use image processing techniques to compute beam metrics from the raw detector images +1. Link each outcome back to its suggestion via the `_id` field ## Creating and Running the Agent The **Agent** brings everything together. It: + - Uses DOFs to know what parameters to adjust - Uses objectives to know what to optimize - Calls the evaluation function to assess each configuration @@ -167,7 +243,7 @@ The **Agent** brings everything together. It: ```{code-cell} ipython3 agent = Agent( - sensors=[beamline.det], + sensors=[det], dofs=dofs, objectives=objectives, evaluation_function=DetectorEvaluation(tiled_client), @@ -177,7 +253,7 @@ agent = Agent( ) ``` -The `sensors` list contains any devices that produce data during acquisition. Here, `beamline.det` is our detector. +The `sensors` list contains any devices that produce data during acquisition. Here, `det` is our detector device. ## Running the Optimization @@ -226,7 +302,7 @@ agent.ax_client.summarize() The `plot_objective` method shows how an objective varies across the DOF space, based on the surrogate model the agent built: ```{code-cell} ipython3 -_ = agent.plot_objective(x_dof_name="bl_kbh_dsh", y_dof_name="bl_kbv_dsv", objective_name="bl_det_sum") +_ = agent.plot_objective(x_dof_name="kbh-radius", y_dof_name="kbv-radius", objective_name="intensity") ``` This plot reveals the landscape the optimizer explored. Peaks (for maximization) or valleys (for minimization) show where good configurations lie. @@ -246,14 +322,14 @@ Now move the mirrors to these optimal positions and acquire an image: from bluesky.plans import list_scan uid = RE(list_scan( - [beamline.det], - beamline.kbv_dsv, [optimal_parameters[beamline.kbv_dsv.name]], - beamline.kbh_dsh, [optimal_parameters[beamline.kbh_dsh.name]], + [det], + kbv.radius, [optimal_parameters[kbv.radius.name]], + kbh.radius, [optimal_parameters[kbh.radius.name]], )) ``` ```{code-cell} ipython3 -image = tiled_client[uid[0]]["primary/bl_det_image"].read().squeeze() +image = tiled_client[uid[0]]["primary/det_image"].read().squeeze() plt.imshow(image) plt.colorbar() plt.show() @@ -264,12 +340,12 @@ plt.show() In this tutorial, you worked through a complete Bayesian optimization workflow: 1. **DOFs** define the search space—the parameters you can control and their allowed ranges -2. **Objectives** specify your goals and whether to minimize or maximize each one -3. **Evaluation functions** extract meaningful metrics from experimental data -4. **The Agent** coordinates everything, building a model of your system and intelligently exploring the parameter space -5. **Health checks** let you diagnose optimization progress and catch issues early +1. **Objectives** specify your goals and whether to minimize or maximize each one +1. **Evaluation functions** extract meaningful metrics from experimental data +1. **The Agent** coordinates everything, building a model of your system and intelligently exploring the parameter space +1. **Health checks** let you diagnose optimization progress and catch issues early -These same components apply to any optimization problem: swap the simulated beamline for real hardware, adjust the DOFs and objectives for your system, and write an evaluation function that extracts your metrics. +These same components apply to any optimization problem: swap the simulated devices for real hardware, adjust the DOFs and objectives for your system, and write an evaluation function that extracts your metrics. ## Next Steps @@ -277,6 +353,6 @@ These same components apply to any optimization problem: swap the simulated beam - Explore [DOF constraints](../how-to-guides/set-dof-constraints.rst) to encode physical limitations - See [outcome constraints](../how-to-guides/set-outcome-constraints.rst) to enforce requirements on your results -For the beamline setup code used in this tutorial, see: -- [xrt_beamline.py](https://github.com/NSLS-II/blop/blob/main/src/blop/sim/xrt_beamline.py) -- [xrt_kb_model.py](https://github.com/NSLS-II/blop/blob/main/src/blop/sim/xrt_kb_model.py) +## See Also + +- [`blop_sim` package](https://github.com/bluesky/blop/tree/main/sim/blop_sim) for XRT simulated beamline control diff --git a/docs/wip/qserver-experiment.md b/docs/wip/qserver-experiment.md index 80f0a230..45a21edf 100644 --- a/docs/wip/qserver-experiment.md +++ b/docs/wip/qserver-experiment.md @@ -25,9 +25,9 @@ The Queueserver should have a RE which sends documents to a ZMQ buffer and then We will: - Use the ZMQ buffer to find a stop document that will tell us a trial point has been measured -- Use the Tiled client to access the data +- Use the Tiled client to access the data -You will need: +You will need: - The Queueserver IP and control port e.g. `tcp://localhost:60615` - The Queueserver IP and info port e.g. `tcp://localhost:60625` @@ -36,21 +36,19 @@ You will need: +++ - ### The Ophyd devices in the Queueserver Environment The following devices should be made available in the Queueserver Environment. ```python - from ophyd import Device, Component as Cpt, Signal from blop.utils.functions import himmelblau from ophyd.sim import motor1, motor2 from ophyd.sim import SynGauss, SynSignal, EnumSignal import numpy as np + class SynHimmelblauDetector(Device): - """ Evaluate a point on a Gaussian based on the value of a motor. @@ -93,16 +91,16 @@ class SynHimmelblauDetector(Device): noise_multiplier = Cpt(Signal, value=1, kind="config") def _compute(self): - + # Get the current values of the motors x = self._motor0.read()[self._motor_field0]["value"] y = self._motor1.read()[self._motor_field1]["value"] m = np.array([x, y]) - + noise = self.noise.get() noise_multiplier = self.noise_multiplier.get() - - v = himmelblau(x,y) + + v = himmelblau(x, y) if noise == "poisson": v = int(self.random_state.poisson(np.round(v), 1)) elif noise == "uniform": @@ -139,22 +137,15 @@ class SynHimmelblauDetector(Device): def trigger(self, *args, **kwargs): return self.val.trigger(*args, **kwargs) - -himmel_det = SynHimmelblauDetector( "himmel_det", - motor1, - "motor1", - motor2, - "motor2", - labels={"detectors"}, - noise='uniform', - noise_multiplier=0.01 - ) + +himmel_det = SynHimmelblauDetector( + "himmel_det", motor1, "motor1", motor2, "motor2", labels={"detectors"}, noise="uniform", noise_multiplier=0.01 +) ``` +++ - ### Plans in the Queueserver Environment The Queueserver environment has the plan `acquire` which wraps the `list_scan` plan like this: @@ -162,9 +153,8 @@ The Queueserver environment has the plan `acquire` which wraps the `list_scan` p ```python from blop.plans import TParameterization, Movable, TParameterValue, defaultdict -def _unpack_parameters(dofs: list[Movable], parameterizations: list[TParameterization]) -> list[Movable | TParameterValue]: - +def _unpack_parameters(dofs: list[Movable], parameterizations: list[TParameterization]) -> list[Movable | TParameterValue]: """Unpack the parameterizations into Bluesky plan arguments.""" unpacked_dict = defaultdict(list) for parameterization in parameterizations: @@ -175,11 +165,11 @@ def _unpack_parameters(dofs: list[Movable], parameterizations: list[TParameteriz raise ValueError(f"Parameter {dof.name} not found in parameterization. Parameterization: {parameterization}") """ create a dict of dofs""" - + dofs_dict = {} for dof in dofs: dofs_dict[dof.name] = dof - + """ Finally, create a list of dofs and setpoints """ unpacked_list = [] for dof_name, values in unpacked_dict.items(): @@ -188,15 +178,14 @@ def _unpack_parameters(dofs: list[Movable], parameterizations: list[TParameteriz return unpacked_list -def acquire(readables, dofs, trials:dict, md=None): - + +def acquire(readables, dofs, trials: dict, md=None): + plan_args = _unpack_parameters(dofs, trials.values()) - - yield from list_scan(readables, *plan_args,per_step=None, md=md) + yield from list_scan(readables, *plan_args, per_step=None, md=md) ``` - +++ ## Remote Blop Agent @@ -218,7 +207,7 @@ tiled_client = from_uri("http://localhost:8000", api_key='secret') ### Configuration of the Blop Agent -Just as in the other tutorials, we have to configure the DOFS, Objectives and Sensors. +Just as in the other tutorials, we have to configure the DOFS, Objectives and Sensors. Unlike the other tutorials, all of these are now just strings because the objects to the real devices exist only in the Queueserver environment. @@ -243,7 +232,7 @@ sensors = ['himmel_det'] ### Making an Evaluation Function -After the agent has suggested points and run them on the Queueserver, we want to update our model with the results. The EvaluationFunction defines how data is read from a bluesky run and the objective values are created. Unlike the blop agent which produces messages to be consumed by a run engine, the blop Queueserver agent is operating with a process which it doesn't, or at least might not, solely control. It's possible that the agent will submit plans to a queue that is not empty. We therefore have to search for a specific key value pair in a stop document. +After the agent has suggested points and run them on the Queueserver, we want to update our model with the results. The EvaluationFunction defines how data is read from a bluesky run and the objective values are created. Unlike the blop agent which produces messages to be consumed by a run engine, the blop Queueserver agent is operating with a process which it doesn't, or at least might not, solely control. It's possible that the agent will submit plans to a queue that is not empty. We therefore have to search for a specific key value pair in a stop document. The EvaluationFunction is called every time a stop document is recieved. It must include some check to see if this particular stop document is from the plan that was submitted previously by the agent. The `BlopQserverAgent` adds a key `agent_suggestion_uid` to the start document `md` dict. This `agent_suggestion_uid` is passed to the `EvaluationFunction` as the argument `uid`. @@ -295,7 +284,7 @@ class DetectorEvaluation(EvaluationFunction): ### Create the agent -Finally we put everything together, instantiate the agent and start an optimization. +Finally we put everything together, instantiate the agent and start an optimization. ```{code-cell} ipython3 diff --git a/launch-devcontainer.sh b/launch-devcontainer.sh new file mode 100755 index 00000000..8983aaee --- /dev/null +++ b/launch-devcontainer.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# Simple script to launch the devcontainer with OpenCode + +echo "Starting devcontainer..." +docker compose up -d + +echo "" +echo "Entering container..." +docker compose exec devcontainer bash -c "source /root/.bashrc && exec bash" diff --git a/pixi.toml b/pixi.toml index b19598b0..a7c5e328 100644 --- a/pixi.toml +++ b/pixi.toml @@ -1,8 +1,11 @@ [workspace] -authors = ["jessica-moylan ", "Thomas Hopkins "] +authors = [ + "jessica-moylan ", + "Thomas Hopkins ", +] channels = ["conda-forge"] name = "blop" -platforms = ["linux-64"] +platforms = ["linux-64", "osx-arm64"] version = "0.9.0" [dependencies] @@ -12,22 +15,34 @@ python = ">=3.10.0,<3.14" [feature.dev.pypi-dependencies] blop = { path = ".", editable = true, extras = ["dev"] } +blop-sim = { path = "sim", editable = true } # CPU-only variant of dev feature [feature.dev-cpu.dependencies] [feature.dev-cpu.pypi-dependencies] blop = { path = ".", editable = true, extras = ["dev", "cpu"] } +blop-sim = { path = "sim", editable = true } [feature.dev-cpu.tasks] check = "pre-commit run --all-files" -integration-tests = "pytest src/blop/tests/integration" -unit-tests = "pytest src/blop/tests/unit" +tests = "pytest src/blop/tests" [feature.docs.dependencies] [feature.docs.pypi-dependencies] -blop = { path = ".", editable = true, extras = ["docs", "cpu"] } +blop = { path = ".", editable = true, extras = ["cpu"] } +blop-sim = { path = "sim", editable = true } +numpydoc = "*" +sphinx-copybutton = "*" +myst-nb = "*" +furo = "*" +jupytext = "*" +pydata-sphinx-theme = "*" +tiled = { version = ">=0.2.0", extras = ["all"] } +bluesky-tiled-plugins = "*" +ophyd-async = "*" +opencv-python = "*" [feature.py310.dependencies] python = "3.10.*" @@ -43,8 +58,7 @@ python = "3.13.*" [feature.dev.tasks] check = "pre-commit run --all-files" -integration-tests = "pytest src/blop/tests/integration" -unit-tests = "pytest src/blop/tests/unit" +tests = "pytest src/blop/tests" [feature.docs.tasks] build-docs = "make -C docs/ html" diff --git a/pyproject.toml b/pyproject.toml index fae747b5..dc28c323 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,40 +46,23 @@ dynamic = ["version"] [project.optional-dependencies] dev = [ "pytest", + "pytest-cov", "ipykernel", "ruff", "nbstripout", "pre-commit", "pandas-stubs", "coverage", - "tiled[all]>=0.2.0", - "matplotlib", - "pyqt5", "pyright", - "ophyd", - "ophyd-async", - "h5py", - "area-detector-handlers", - "bluesky-tiled-plugins", - "xrt>=1.6.1,<2", -] -docs = [ - "numpydoc", - "sphinx-copybutton", - "myst-nb", - "furo", - "jupytext", - "pydata-sphinx-theme", - "blop[dev,cpu]" ] cpu = [ # Empty extra - the source configuration below routes to CPU-only index ] [project.urls] -Homepage = "https://github.com/NSLS-II/blop" +Homepage = "https://github.com/bluesky/blop" Documentation = "https://nsls-ii.github.io/blop" -"Bug Reports" = "https://github.com/NSLS-II/blop/issues" +"Bug Reports" = "https://github.com/bluesky/blop/issues" [tool.hatch] version.source = "vcs" @@ -131,7 +114,7 @@ convention = "numpy" [tool.pyright] ignore = [ - "src/blop/sim/", + "sim/", "src/blop/tests/", "src/blop/bayesian/", # TODO: Remove this and fix type errors "src/blop/ax/qserver_agent.py", # TODO: Remove this and fix type errors diff --git a/sim/README.md b/sim/README.md new file mode 100644 index 00000000..84bffa45 --- /dev/null +++ b/sim/README.md @@ -0,0 +1,29 @@ +# Blop Simulations + +This package provides ophyd-async simulation devices for BLOP documentation and tutorials. It is **not published to PyPI** and is only meant for local development, testing, and running tutorials. + +## Installation + +To use the examples and tutorials, install this package in editable mode from the repository root: + +```bash +pip install -e sim/ +``` + +## Architecture + +The package uses a component-based architecture with individual devices: + +- **Backends**: Global singletons that handle beam physics + + - `SimpleBackend`: Mathematical Gaussian beam simulation + - `XRTBackend`: Full ray-tracing simulation using XRT + +- **Devices**: Individual ophyd-async devices + + - Backend-agnostic: + - `DetectorDevice`: Generates beam images (from the backend API) + - `SlitDevice`: Four-blade aperture slit + - Backend-specific (available in submodules): + - `blop_sim.devices.simple.KBMirror`: KB mirror with jack positions (for SimpleBackend) + - `blop_sim.devices.xrt.KBMirror`: KB mirror with curvature radius (for XRTBackend) diff --git a/sim/blop_sim/__init__.py b/sim/blop_sim/__init__.py new file mode 100644 index 00000000..6d66a330 --- /dev/null +++ b/sim/blop_sim/__init__.py @@ -0,0 +1,10 @@ +"""blop_sim: Simulation devices for BLOP documentation and tutorials.""" + +# Backend exports +from .backends.simple import SimpleBackend +from .backends.xrt import XRTBackend + +__all__ = [ + "SimpleBackend", + "XRTBackend", +] diff --git a/sim/blop_sim/backends/__init__.py b/sim/blop_sim/backends/__init__.py new file mode 100644 index 00000000..78f79df8 --- /dev/null +++ b/sim/blop_sim/backends/__init__.py @@ -0,0 +1,7 @@ +"""Backend simulation infrastructure for blop_sim.""" + +from .core import SimBackend +from .simple import SimpleBackend +from .xrt import XRTBackend + +__all__ = ["SimBackend", "SimpleBackend", "XRTBackend"] diff --git a/sim/blop_sim/backends/core.py b/sim/blop_sim/backends/core.py new file mode 100644 index 00000000..adb6c08e --- /dev/null +++ b/sim/blop_sim/backends/core.py @@ -0,0 +1,79 @@ +from abc import ABC, abstractmethod +from collections.abc import Awaitable, Callable +from typing import Any + +import numpy as np + + +class SimBackend(ABC): + """Base class for simulation backends. + + Uses singleton pattern - only one instance per backend type exists. + All device callbacks are expected to be async. + """ + + _instances: dict[type, "SimBackend"] = {} + + def __new__(cls): + """Singleton pattern: return existing instance or create new.""" + if cls not in cls._instances: + instance = super().__new__(cls) + cls._instances[cls] = instance + instance._initialized = False + return cls._instances[cls] + + def __init__(self): + """Initialize backend state (only runs once due to singleton).""" + if self._initialized: + return + + self._device_states: dict[str, dict[str, Any]] = {} + self._image_shape = (300, 400) + self._initialized = True + + def register_device(self, device_name: str, device_type: str, get_state_callback: Callable[[], Awaitable[dict]]): + """Register a device with the backend. + + Args: + device_name: Unique name for the device + device_type: Type of device ("kb_mirror_simple", "kb_mirror_xrt", "slit", "detector") + get_state_callback: Async callable that returns current device state as dict + + Example:: + + async def _get_state(self) -> dict: + return { + "radius": await self.radius.get_value(), + "position": await self.position.get_value(), + } + """ + self._device_states[device_name] = { + "type": device_type, + "get_state": get_state_callback, + } + + async def _get_device_state(self, device_name: str) -> dict: + """Get device state asynchronously. + + Args: + device_name: Name of the device + + Returns: + Device state dictionary + """ + device = self._device_states[device_name] + callback = device["get_state"] + return await callback() + + @abstractmethod + async def generate_beam(self) -> np.ndarray: + """Generate beam image based on current device states. + + Returns: + 2D numpy array with shape self._image_shape + """ + pass + + def get_image_shape(self) -> tuple[int, int]: + """Return the image shape.""" + return self._image_shape diff --git a/src/blop/tests/unit/__init__.py b/sim/blop_sim/backends/models/__init__.py similarity index 100% rename from src/blop/tests/unit/__init__.py rename to sim/blop_sim/backends/models/__init__.py diff --git a/src/blop/sim/xrt_kb_model.py b/sim/blop_sim/backends/models/xrt_kb_model.py similarity index 100% rename from src/blop/sim/xrt_kb_model.py rename to sim/blop_sim/backends/models/xrt_kb_model.py diff --git a/sim/blop_sim/backends/simple.py b/sim/blop_sim/backends/simple.py new file mode 100644 index 00000000..05bf615c --- /dev/null +++ b/sim/blop_sim/backends/simple.py @@ -0,0 +1,104 @@ +"""Simple mathematical beam simulation backend.""" + +import numpy as np +import scipy as sp # type: ignore[import-untyped] + +from .core import SimBackend + + +class SimpleBackend(SimBackend): + """Mathematical Gaussian beam simulation with 4th power falloff.""" + + def __init__(self, noise: bool = False) -> None: + super().__init__() + self._noise = noise + + async def generate_beam(self) -> np.ndarray: + """Generate beam using mathematical Gaussian model. + + The beam is affected by: + - KB mirror jack positions (controls focus position and width) + - Slit aperture (clips the beam) + - Optional noise (white + pink noise) + + Returns: + 2D numpy array with shape (nx, ny) + """ + nx, ny = self._image_shape + + # Get device states + kb_states = await self._get_kb_states() + slit_state = await self._get_slit_state() + + # Create meshgrid + x = np.linspace(-10, 10, ny) + y = np.linspace(-10, 10, nx) + X, Y = np.meshgrid(x, y) + + # Calculate beam center from KB mirror positions + x0 = kb_states["kbh"]["ush"] - kb_states["kbh"]["dsh"] + y0 = kb_states["kbv"]["usv"] - kb_states["kbv"]["dsv"] + + # Calculate beam widths from KB mirror positions + x_width = np.sqrt(0.2 + 5e-1 * (kb_states["kbh"]["ush"] + kb_states["kbh"]["dsh"] - 1) ** 2) + y_width = np.sqrt(0.1 + 5e-1 * (kb_states["kbv"]["usv"] + kb_states["kbv"]["dsv"] - 2) ** 2) + + # Generate Gaussian beam with 4th power falloff + beam = np.exp(-0.5 * (((X - x0) / x_width) ** 4 + ((Y - y0) / y_width) ** 4)) / ( + np.sqrt(2 * np.pi) * x_width * y_width + ) + + # Apply slit mask + mask = X > slit_state["inboard"] + mask &= X < slit_state["outboard"] + mask &= Y > slit_state["lower"] + mask &= Y < slit_state["upper"] + mask = sp.ndimage.gaussian_filter(mask.astype(float), sigma=1) + + image = beam * mask + + # Add noise if requested + if self._noise: + kx = np.fft.fftfreq(n=len(x), d=0.1) + ky = np.fft.fftfreq(n=len(y), d=0.1) + KX, KY = np.meshgrid(kx, ky) + + power_spectrum = 1 / (1e-2 + KX**2 + KY**2) + + white_noise = 1e-3 * np.random.standard_normal(size=X.shape) + pink_noise = 1e-3 * np.real(np.fft.ifft2(power_spectrum * np.fft.fft2(np.random.standard_normal(size=X.shape)))) + + image += white_noise + pink_noise + + return image + + async def _get_kb_states(self) -> dict: + """Get KB mirror states from registered devices.""" + kbh_state = {"ush": 0.0, "dsh": 0.0} + kbv_state = {"usv": 0.0, "dsv": 0.0} + + for name, device in self._device_states.items(): + if device["type"] == "kb_mirror_simple": + state = await self._get_device_state(name) + if state["orientation"] == "horizontal": + kbh_state["ush"] = state["upstream"] + kbh_state["dsh"] = state["downstream"] + elif state["orientation"] == "vertical": + kbv_state["usv"] = state["upstream"] + kbv_state["dsv"] = state["downstream"] + + return {"kbh": kbh_state, "kbv": kbv_state} + + async def _get_slit_state(self) -> dict: + """Get slit state from registered devices.""" + slit_state = {"inboard": -5.0, "outboard": 5.0, "lower": -5.0, "upper": 5.0} + + for name, device in self._device_states.items(): + if device["type"] == "slit": + slit_state = await self._get_device_state(name) + break + + return slit_state + + +__all__ = ["SimpleBackend"] diff --git a/sim/blop_sim/backends/xrt.py b/sim/blop_sim/backends/xrt.py new file mode 100644 index 00000000..0815d033 --- /dev/null +++ b/sim/blop_sim/backends/xrt.py @@ -0,0 +1,77 @@ +"""XRT ray-tracing beam simulation backend.""" + +import numpy as np + +from . import SimBackend +from .models.xrt_kb_model import build_beamline, build_histRGB, run_process + + +class XRTBackend(SimBackend): + """XRT ray-tracing simulation backend. + + Uses the XRT package to perform realistic ray-tracing through a KB mirror pair. + Much slower than SimpleBackend but more physically accurate. + """ + + def __init__(self, noise: bool = False): + """Initialize XRT backend.""" + super().__init__() + self._beamline = None + self._limits = [[-0.6, 0.6], [-0.45, 0.45]] + self._noise = noise + + def _ensure_beamline(self): + """Build XRT beamline if not already built.""" + if self._beamline is None: + self._beamline = build_beamline() + + async def generate_beam(self) -> np.ndarray: + """Generate beam using XRT ray-tracing. + + Returns: + 2D numpy array with shape (300, 400) + """ + self._ensure_beamline() + + # Get KB mirror radii from devices + mirror_radii = await self._get_mirror_radii() + + # Update XRT beamline mirror parameters + self._beamline.toroidMirror01.R = mirror_radii[0] # Vertical mirror + self._beamline.toroidMirror02.R = mirror_radii[1] # Horizontal mirror + + # Run ray tracing + outDict = run_process(self._beamline) + lb = outDict["screen01beamLocal01"] + + # Build histogram from ray data + hist2d, _, _ = build_histRGB(lb, lb, limits=self._limits, isScreen=True, shape=[400, 300]) + image = hist2d + + # Add noise if requested + if self._noise: + image += 1e-3 * np.abs(np.random.standard_normal(size=image.shape)) + + return image + + async def _get_mirror_radii(self) -> list[float]: + """Get KB mirror radii from registered devices. + + Returns: + [R1, R2] where R1 is first mirror (vertical), R2 is second mirror (horizontal) + """ + # Default radii from xrt_kb_model.py + radii = [38245.0, 21035.0] + + for name, device in self._device_states.items(): + if device["type"] == "kb_mirror_xrt": + state = await self._get_device_state(name) + mirror_index = state["mirror_index"] + radius = state["radius"] + if mirror_index < len(radii): + radii[mirror_index] = radius + + return radii + + +__all__ = ["XRTBackend"] diff --git a/sim/blop_sim/devices/__init__.py b/sim/blop_sim/devices/__init__.py new file mode 100644 index 00000000..bbfb6f8e --- /dev/null +++ b/sim/blop_sim/devices/__init__.py @@ -0,0 +1,9 @@ +"""Ophyd-async device exports for blop_sim.""" + +from .detector import DetectorDevice +from .slit import SlitDevice + +__all__ = [ + "DetectorDevice", + "SlitDevice", +] diff --git a/sim/blop_sim/devices/detector.py b/sim/blop_sim/devices/detector.py new file mode 100644 index 00000000..169a01a4 --- /dev/null +++ b/sim/blop_sim/devices/detector.py @@ -0,0 +1,228 @@ +"""Detector device for beam simulation - images only, NO statistics.""" + +import itertools +from collections import deque +from collections.abc import AsyncIterator +from pathlib import Path +from typing import Any + +import h5py # type: ignore[import-untyped] +import numpy as np +from event_model import ( # type: ignore[import-untyped] + DataKey, + StreamDatum, + StreamRange, + StreamResource, + compose_stream_resource, +) +from ophyd_async.core import ( + DetectorController, + DetectorWriter, + PathProvider, + StandardDetector, +) + +from ..backends import SimBackend + + +class SimDetectorController(DetectorController): + """Controller for simulated detector - generates images only.""" + + def __init__(self, backend: SimBackend): + self._backend = backend + + def get_deadtime(self, exposure: float | None) -> float: + """Detector has no deadtime (instant acquisition).""" + return 0.0 + + async def prepare(self, trigger_info: Any) -> None: + """Prepare for acquisition with trigger info.""" + # Software triggered detector, no preparation needed + pass + + async def arm(self) -> None: + """Prepare for acquisition.""" + # Software triggered, no arming needed + pass + + async def wait_for_idle(self): + """Wait for acquisition to complete.""" + # Software triggered, always idle + pass + + async def disarm(self): + """Clean up after acquisition.""" + pass + + +class SimDetectorWriter(DetectorWriter): + """Writer for detector with Tiled streaming.""" + + def __init__( + self, + backend: SimBackend, + path_provider: PathProvider, + ): + self._backend = backend + self.path_provider = path_provider + self._asset_docs_cache: deque[tuple[str, StreamResource | StreamDatum]] = deque() + self._h5file: h5py.File | None = None + self._dataset: h5py.Dataset | None = None + self._counter: itertools.count[int] | None = None + self._stream_datum_factory: Any | None = None + self._last_index = 0 + + async def open(self, name: str | None = None, exposures_per_event: int = 1) -> dict[str, DataKey]: + """Open HDF5 file and setup stream resources. + + Args: + name: Name of detector (optional, uses name_provider if not given) + exposures_per_event: Number of exposures per event + """ + # Create directory structure + path_info = self.path_provider() + full_path = path_info.directory_path / path_info.filename + Path(path_info.directory_path).mkdir(parents=True, exist_ok=True) + + # Get image shape from backend + image_shape = self._backend.get_image_shape() + + # Create HDF5 file + self._h5file = h5py.File(full_path, "x") + group = self._h5file.create_group("/entry") + self._dataset = group.create_dataset( + "image", + data=np.full(fill_value=np.nan, shape=(1, *image_shape)), + maxshape=(None, *image_shape), + chunks=(1, *image_shape), + dtype="float64", + compression="lzf", + ) + + self._counter = itertools.count() + data_key = f"{name}_image" + + # Create stream resource + uri = f"file://localhost/{str(full_path).strip('/')}" + ( + stream_resource_doc, + self._stream_datum_factory, + ) = compose_stream_resource( + mimetype="application/x-hdf5", + uri=uri, + data_key=data_key, + parameters={ + "chunk_shape": (1, *image_shape), + "dataset": "/entry/image", + }, + ) + + self._asset_docs_cache.append(("stream_resource", stream_resource_doc)) + + # Return describe dictionary + return { + data_key: { + "source": "sim", + "shape": [1, *image_shape], + "dtype": "array", + "dtype_numpy": np.dtype(np.float64).str, + "external": "STREAM:", + } + } + + async def observe_indices_written(self, timeout: float = float("inf")) -> AsyncIterator[int]: + """Observe indices as they're written - yield after each frame is generated.""" + # Generate one image immediately (software-triggered, instant acquisition) + await self._write_single_frame() + + # Yield the index to signal completion + yield self._last_index + + async def get_indices_written(self) -> int: + """Get number of indices written so far.""" + return self._last_index + + async def collect_stream_docs( + self, name: str, indices_written: int + ) -> AsyncIterator[tuple[str, StreamResource | StreamDatum]]: + """Collect stream datum documents from the cache. + + Args: + name: Name of the detector device + indices_written: Number of indices written + """ + # Pop all documents from the cache and yield them + while self._asset_docs_cache: + yield self._asset_docs_cache.popleft() + + async def close(self) -> None: + """Close HDF5 file.""" + if self._h5file: + self._h5file.close() + self._h5file = None + + async def _write_single_frame(self) -> None: + """Generate and write a single beam image (internal method).""" + if self._counter is None or self._dataset is None or self._stream_datum_factory is None: + raise RuntimeError("Writer not open, call open() first") + + # Generate beam image from backend (async) + image = await self._backend.generate_beam() + + # Store image + current_frame = next(self._counter) + self._dataset.resize((current_frame + 1, *image.shape)) + self._dataset[current_frame, :, :] = image + + # Create stream datum + stream_datum_doc = self._stream_datum_factory( + StreamRange(start=current_frame, stop=current_frame + 1), + ) + self._asset_docs_cache.append(("stream_datum", stream_datum_doc)) + + self._last_index = current_frame + 1 + + +class DetectorDevice(StandardDetector): + """Detector device that generates beam images. + + Args: + backend: Simulation backend + path_provider: Provides directory path for HDF5 files + name: Device name + """ + + def __init__( + self, + backend: SimBackend, + path_provider: PathProvider, + name: str = "", + ): + self._backend = backend + + # Create controller + controller = SimDetectorController(backend) + + # Create writer + writer = SimDetectorWriter(backend, path_provider) + + super().__init__( + controller=controller, + writer=writer, + config_sigs=[], + name=name, + ) + + # Register with backend + backend.register_device( + device_name=name, + device_type="detector", + get_state_callback=self._get_state, + ) + + async def _get_state(self) -> dict: + """Get current detector state for backend.""" + return {} + + +__all__ = ["DetectorDevice"] diff --git a/sim/blop_sim/devices/simple/__init__.py b/sim/blop_sim/devices/simple/__init__.py new file mode 100644 index 00000000..747c2df4 --- /dev/null +++ b/sim/blop_sim/devices/simple/__init__.py @@ -0,0 +1,5 @@ +"""SimpleBackend-specific devices.""" + +from .kb_mirror import KBMirror + +__all__ = ["KBMirror"] diff --git a/sim/blop_sim/devices/simple/kb_mirror.py b/sim/blop_sim/devices/simple/kb_mirror.py new file mode 100644 index 00000000..99ee0c1b --- /dev/null +++ b/sim/blop_sim/devices/simple/kb_mirror.py @@ -0,0 +1,48 @@ +"""KB mirror devices for SimpleBackend.""" + +from ophyd_async.core import StandardReadable, soft_signal_rw +from ophyd_async.core import StandardReadableFormat as Format + +from ...backends import SimBackend + + +class KBMirror(StandardReadable): + """KB mirror with jack position control (for SimpleBackend). + + Exposes two jack positions (upstream/downstream) that control the mirror curvature. + Used with SimpleBackend for mathematical beam simulation. + + Args: + backend: Simulation backend (should be SimpleBackend) + orientation: "horizontal" or "vertical" + name: Device name + """ + + def __init__(self, backend: SimBackend, orientation: str = "horizontal", name: str = ""): + self._backend = backend + self._orientation = orientation + + # Jack position signals (CONFIG since they're not measurement outputs) + with self.add_children_as_readables(Format.HINTED_SIGNAL): + self.upstream = soft_signal_rw(float, 0.0) + self.downstream = soft_signal_rw(float, 0.0) + + super().__init__(name=name) + + # Register with backend + backend.register_device( + device_name=name, + device_type="kb_mirror_simple", + get_state_callback=self._get_state, + ) + + async def _get_state(self) -> dict: + """Get current mirror state for backend (async).""" + return { + "orientation": self._orientation, + "upstream": await self.upstream.get_value(), + "downstream": await self.downstream.get_value(), + } + + +__all__ = ["KBMirror"] diff --git a/sim/blop_sim/devices/slit.py b/sim/blop_sim/devices/slit.py new file mode 100644 index 00000000..6852a0bc --- /dev/null +++ b/sim/blop_sim/devices/slit.py @@ -0,0 +1,49 @@ +"""Slit device for beam simulation.""" + +from ophyd_async.core import StandardReadable, soft_signal_rw +from ophyd_async.core import StandardReadableFormat as Format + +from ..backends import SimBackend + + +class SlitDevice(StandardReadable): + """Four-blade slit device for aperture control. + + Controls a rectangular aperture that clips the beam. The slit is defined by + four blade positions that create a window in the beam path. + + Args: + backend: Simulation backend + name: Device name + """ + + def __init__(self, backend: SimBackend, name: str = ""): + self._backend = backend + + # Four blade positions (CONFIG since they're not measurement outputs) + with self.add_children_as_readables(Format.CONFIG_SIGNAL): + self.inboard = soft_signal_rw(float, -5.0) + self.outboard = soft_signal_rw(float, 5.0) + self.lower = soft_signal_rw(float, -5.0) + self.upper = soft_signal_rw(float, 5.0) + + super().__init__(name=name) + + # Register with backend + backend.register_device( + device_name=name, + device_type="slit", + get_state_callback=self._get_state, + ) + + async def _get_state(self) -> dict: + """Get current slit state for backend (async).""" + return { + "inboard": await self.inboard.get_value(), + "outboard": await self.outboard.get_value(), + "lower": await self.lower.get_value(), + "upper": await self.upper.get_value(), + } + + +__all__ = ["SlitDevice"] diff --git a/sim/blop_sim/devices/xrt/__init__.py b/sim/blop_sim/devices/xrt/__init__.py new file mode 100644 index 00000000..960eb516 --- /dev/null +++ b/sim/blop_sim/devices/xrt/__init__.py @@ -0,0 +1,5 @@ +"""XRTBackend-specific devices.""" + +from .kb_mirror import KBMirror + +__all__ = ["KBMirror"] diff --git a/sim/blop_sim/devices/xrt/kb_mirror.py b/sim/blop_sim/devices/xrt/kb_mirror.py new file mode 100644 index 00000000..873131a2 --- /dev/null +++ b/sim/blop_sim/devices/xrt/kb_mirror.py @@ -0,0 +1,53 @@ +"""KB mirror devices for XRTBackend.""" + +from ophyd_async.core import StandardReadable, soft_signal_rw +from ophyd_async.core import StandardReadableFormat as Format + +from ...backends import SimBackend + + +class KBMirror(StandardReadable): + """KB mirror with curvature radius control (for XRTBackend). + + Exposes a single radius parameter that directly controls the XRT mirror R value. + Used with XRTBackend for ray-tracing simulation. + + Args: + backend: Simulation backend (should be XRTBackend) + mirror_index: 0 for first mirror (vertical), 1 for second mirror (horizontal) + initial_radius: Initial curvature radius in mm + name: Device name + """ + + def __init__( + self, + backend: SimBackend, + mirror_index: int, + initial_radius: float = 30000.0, + name: str = "", + ): + self._backend = backend + self._mirror_index = mirror_index + + # Curvature radius signal + with self.add_children_as_readables(Format.HINTED_SIGNAL): + self.radius = soft_signal_rw(float, initial_radius) + + super().__init__(name=name) + + # Register with backend + backend.register_device( + device_name=name, + device_type="kb_mirror_xrt", + get_state_callback=self._get_state, + ) + + async def _get_state(self) -> dict: + """Get current mirror state for backend (async).""" + return { + "mirror_index": self._mirror_index, + "radius": await self.radius.get_value(), + } + + +__all__ = ["KBMirror"] diff --git a/sim/pyproject.toml b/sim/pyproject.toml new file mode 100644 index 00000000..db104e23 --- /dev/null +++ b/sim/pyproject.toml @@ -0,0 +1,25 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "blop-sim" +version = "0.0.0" +description = "Example simulations and benchmarks for blop (not published to PyPI)" +readme = "README.md" +authors = [{ name = "Thomas Hopkins", email = "thopkins1@bnl.gov" }] +requires-python = ">=3.10" +dependencies = [ + "blop", + "ophyd-async", + "h5py", + "area-detector-handlers", + "bluesky-tiled-plugins", + "numpy", + "scipy", + "matplotlib", + "xrt>=1.6.1,<2", +] + +[tool.hatch.build.targets.wheel] +packages = ["blop_sim"] diff --git a/src/blop/__init__.py b/src/blop/__init__.py index d1e44fa8..dcb51ec4 100644 --- a/src/blop/__init__.py +++ b/src/blop/__init__.py @@ -1,4 +1,5 @@ from .ax import Agent, ChoiceDOF, DOFConstraint, Objective, OutcomeConstraint, RangeDOF, ScalarizedObjective +from .plans import acquire_baseline, default_acquire, optimize, optimize_step, sample_suggestions try: from ._version import __version__ @@ -8,10 +9,15 @@ __all__ = [ "__version__", "Agent", - "RangeDOF", "ChoiceDOF", "DOFConstraint", "Objective", "OutcomeConstraint", + "RangeDOF", "ScalarizedObjective", + "acquire_baseline", + "default_acquire", + "optimize", + "optimize_step", + "sample_suggestions", ] diff --git a/src/blop/ax/agent.py b/src/blop/ax/agent.py index 8e99e2b9..76a7057d 100644 --- a/src/blop/ax/agent.py +++ b/src/blop/ax/agent.py @@ -1,7 +1,7 @@ import importlib.util import logging from collections.abc import Sequence -from typing import Any +from typing import Any, TypeGuard from ax import Client from ax.analysis import ContourPlot @@ -15,8 +15,9 @@ # =============================== from bluesky.utils import MsgGenerator -from ..plans import acquire_baseline, optimize +from ..plans import acquire_baseline, optimize, sample_suggestions from ..protocols import AcquisitionPlan, Actuator, EvaluationFunction, OptimizationProblem, Sensor +from ..utils import InferredReadable from .dof import DOF, DOFConstraint from .objective import Objective, OutcomeConstraint, to_ax_objective_str from .optimizer import AxOptimizer @@ -24,6 +25,14 @@ logger = logging.getLogger(__name__) +def _has_str_keys(d: dict[DOF, Any] | dict[str, Any]) -> TypeGuard[dict[str, Any]]: + return all(isinstance(key, str) for key in d.keys()) + + +def _has_dof_keys(d: dict[DOF, Any] | dict[str, Any]) -> TypeGuard[dict[DOF, Any]]: + return all(isinstance(key, DOF) for key in d.keys()) + + class Agent: """ An interface that uses Ax as the backend for optimization and experiment tracking. @@ -98,6 +107,7 @@ def __init__( checkpoint_path=checkpoint_path, **kwargs, ) + self._readable_cache: dict[str, InferredReadable] = {} @classmethod def from_checkpoint( @@ -163,6 +173,34 @@ def ax_client(self) -> Client: def checkpoint_path(self) -> str | None: return self._optimizer.checkpoint_path + @property + def fixed_dofs(self) -> dict[str, Any] | None: + return self._optimizer.fixed_parameters + + @fixed_dofs.setter + def fixed_dofs(self, fixed_dofs: dict[DOF, Any] | dict[str, Any] | None) -> None: + """ + Fix degrees of freedom to a certain value for future optimizations. + + Parameters + ---------- + fixed_dofs : dict[DOF, Any] | dict[str, Any] | None + A mapping of DOFs or DOF names to the values they should be fixed to. + + """ + if not fixed_dofs: + self._optimizer.fixed_parameters = None + return + + if _has_str_keys(fixed_dofs): + self._optimizer.fixed_parameters = fixed_dofs + elif _has_dof_keys(fixed_dofs): + self._optimizer.fixed_parameters = {dof.parameter_name: value for dof, value in fixed_dofs.items()} + else: + raise ValueError( + f"Keys must all be either {type(DOF)} or {type(str)}, but got {type(list(fixed_dofs.keys())[0])}" + ) + def to_optimization_problem(self) -> OptimizationProblem: """ Construct an optimization problem from the agent. @@ -293,7 +331,37 @@ def optimize(self, iterations: int = 1, n_points: int = 1) -> MsgGenerator[None] suggest : Get point suggestions without running acquisition. ingest : Manually ingest evaluation results. """ - yield from optimize(self.to_optimization_problem(), iterations=iterations, n_points=n_points) + yield from optimize( + self.to_optimization_problem(), iterations=iterations, n_points=n_points, readable_cache=self._readable_cache + ) + + def sample_suggestions(self, suggestions: list[dict]) -> MsgGenerator[tuple[str, list[dict], list[dict]]]: + """ + Evaluate specific parameter combinations. + + Acquires data for given suggestions and ingests results. Supports both + optimizer suggestions and manual points. + + Parameters + ---------- + suggestions : list[dict] + Either optimizer suggestions (with "_id") or manual points (without "_id"). + + Returns + ------- + tuple[str, list[dict], list[dict]] + Bluesky run UID, suggestions with "_id", and outcomes. + + See Also + -------- + suggest : Get optimizer suggestions. + optimize : Run full optimization loop. + """ + return ( + yield from sample_suggestions( + self.to_optimization_problem(), suggestions=suggestions, readable_cache=self._readable_cache + ) + ) def plot_objective( self, x_dof_name: str, y_dof_name: str, objective_name: str, *args: Any, **kwargs: Any diff --git a/src/blop/ax/optimizer.py b/src/blop/ax/optimizer.py index 9b473246..0a6ff448 100644 --- a/src/blop/ax/optimizer.py +++ b/src/blop/ax/optimizer.py @@ -3,10 +3,10 @@ from ax import ChoiceParameterConfig, Client, RangeParameterConfig -from ..protocols import ID_KEY, Checkpointable, Optimizer +from ..protocols import ID_KEY, CanRegisterSuggestions, Checkpointable, Optimizer -class AxOptimizer(Optimizer, Checkpointable): +class AxOptimizer(Optimizer, Checkpointable, CanRegisterSuggestions): """ An optimizer that uses Ax as the backend for optimization and experiment tracking. @@ -57,6 +57,7 @@ def __init__( objective=objective, outcome_constraints=outcome_constraints, ) + self._fixed_parameters = None @classmethod def from_checkpoint(cls, checkpoint_path: str) -> "AxOptimizer": @@ -76,6 +77,7 @@ def from_checkpoint(cls, checkpoint_path: str) -> "AxOptimizer": client = Client.load_from_json_file(checkpoint_path) instance = object.__new__(cls) instance._parameter_names = list(client._experiment.parameters.keys()) + instance._fixed_parameters = None instance._checkpoint_path = checkpoint_path instance._client = client @@ -89,6 +91,22 @@ def checkpoint_path(self) -> str | None: def ax_client(self) -> Client: return self._client + @property + def fixed_parameters(self) -> dict[str, Any] | None: + return self._fixed_parameters + + @fixed_parameters.setter + def fixed_parameters(self, fixed_parameters: dict[str, Any] | None) -> None: + if not fixed_parameters: + self._fixed_parameters = None + return + unknown_parameter_names = set(fixed_parameters) - set(self._parameter_names) + if unknown_parameter_names: + raise KeyError( + f"Unknown fixed parameter(s): {sorted(unknown_parameter_names)}, expected: {sorted(self._parameter_names)}" + ) + self._fixed_parameters = dict(fixed_parameters) + def suggest(self, num_points: int | None = None) -> list[dict]: """ Get the next point(s) to evaluate in the search space. @@ -109,7 +127,7 @@ def suggest(self, num_points: int | None = None) -> list[dict]: """ if num_points is None: num_points = 1 - next_trials = self._client.get_next_trials(max_trials=num_points) + next_trials = self._client.get_next_trials(max_trials=num_points, fixed_parameters=self._fixed_parameters) return [ { "_id": trial_index, @@ -158,6 +176,37 @@ def ingest(self, points: list[dict]) -> None: trial_idx = self._client.attach_baseline(parameters=parameters) self._client.complete_trial(trial_index=trial_idx, raw_data=outcomes) + def register_suggestions(self, suggestions: list[dict]) -> list[dict]: + """ + Register manual suggestions with the Ax experiment. + + Attaches trials to the experiment and returns the suggestions with "_id" keys + added for tracking. This enables manual point injection alongside optimizer-driven + suggestions. + + Parameters + ---------- + suggestions : list[dict] + Parameter combinations to register. The "_id" key will be overwritten if present. + + Returns + ------- + list[dict] + The same suggestions with "_id" keys added. + """ + registered = [] + for suggestion in suggestions: + # Extract parameters (ignore _id if present) + parameters = {k: v for k, v in suggestion.items() if k != ID_KEY} + + # Attach trial to Ax experiment + trial_idx = self._client.attach_trial(parameters=parameters) + + # Return with trial ID + registered.append({ID_KEY: trial_idx, **parameters}) + + return registered + def checkpoint(self) -> None: """ Save the optimizer's state to JSON file. diff --git a/src/blop/plan_stubs.py b/src/blop/plan_stubs.py new file mode 100644 index 00000000..93b019e5 --- /dev/null +++ b/src/blop/plan_stubs.py @@ -0,0 +1,100 @@ +from collections import defaultdict +from typing import Any, Literal + +import bluesky.plan_stubs as bps +import numpy as np +from bluesky.utils import MsgGenerator, plan + +from .protocols import ID_KEY +from .utils import InferredReadable + +_BLUESKY_UID_KEY: Literal["bluesky_uid"] = "bluesky_uid" +_SUGGESTION_IDS_KEY: Literal["suggestion_ids"] = "suggestion_ids" + + +@plan +def read_step( + uid: str, suggestions: list[dict], outcomes: list[dict], n_points: int, readable_cache: dict[str, InferredReadable] +) -> MsgGenerator[None]: + """Plan stub to read the suggestions and outcomes of a single optimization step. + + If fewer suggestions are returned than n_points arrays are padded to n_points length + with np.nan to ensure consistent shapes for event-model specification. + + Parameters + ---------- + uid : str + The Bluesky run UID from the acquisition plan. + suggestions : list[dict] + List of suggestion dictionaries, each containing an ID_KEY. + outcomes : list[dict] + List of outcome dictionaries, each containing an ID_KEY matching suggestions. + n_points : int + Expected number of suggestions. Arrays will be padded to this length if needed. + readable_cache : dict[str, InferredReadable] + Cache of InferredReadable objects to reuse across iterations. + """ + # Group by ID_KEY to get proper suggestion/outcome order + suggestion_by_id = {} + outcome_by_id = {} + for suggestion in suggestions: + suggestion_copy = suggestion.copy() + key = str(suggestion_copy.pop(ID_KEY)) + suggestion_by_id[key] = suggestion_copy + for outcome in outcomes: + outcome_copy = outcome.copy() + key = str(outcome_copy.pop(ID_KEY)) + outcome_by_id[key] = outcome_copy + sids = {str(sid) for sid in suggestion_by_id.keys()} + if sids != set(outcome_by_id.keys()): + raise ValueError( + "The suggestions and outcomes must contain the same IDs. Got suggestions: " + f"{set(suggestion_by_id.keys())} and outcomes: {set(outcome_by_id.keys())}" + ) + + # Flatten the suggestions and outcomes into a single dictionary of lists + suggestions_flat: dict[str, list[Any]] = defaultdict(list) + outcomes_flat: dict[str, list[Any]] = defaultdict(list) + # Sort for deterministic ordering, not strictly necessary + sorted_sids = sorted(sids) + for key in sorted_sids: + for name, value in suggestion_by_id[key].items(): + suggestions_flat[name].append(value) + for name, value in outcome_by_id[key].items(): + outcomes_flat[name].append(value) + + # Pad arrays to n_points if suggestions had fewer trials than expected + # TODO: Use awkward-array to handle this in the future + actual_n = len(sorted_sids) + if actual_n < n_points: + # Pad suggestion arrays with NaN + for name in suggestions_flat: + suggestions_flat[name].extend([np.nan] * (n_points - actual_n)) + # Pad outcome arrays with NaN + for name in outcomes_flat: + outcomes_flat[name].extend([np.nan] * (n_points - actual_n)) + # Pad suggestion IDs with empty string to maintain string dtype + sorted_sids.extend([""] * (n_points - actual_n)) + + # Create or update the InferredReadables for the suggestion_ids, step uid, suggestions, and outcomes + if _SUGGESTION_IDS_KEY not in readable_cache: + readable_cache[_SUGGESTION_IDS_KEY] = InferredReadable(_SUGGESTION_IDS_KEY, initial_value=sorted_sids) + else: + readable_cache[_SUGGESTION_IDS_KEY].update(sorted_sids) + if _BLUESKY_UID_KEY not in readable_cache: + readable_cache[_BLUESKY_UID_KEY] = InferredReadable(_BLUESKY_UID_KEY, initial_value=uid) + else: + readable_cache[_BLUESKY_UID_KEY].update(uid) + for name, value in suggestions_flat.items(): + if name not in readable_cache: + readable_cache[name] = InferredReadable(name, initial_value=value) + else: + readable_cache[name].update(value) + for name, value in outcomes_flat.items(): + if name not in readable_cache: + readable_cache[name] = InferredReadable(name, initial_value=value) + else: + readable_cache[name].update(value) + + # Read and save to produce a single event + yield from bps.trigger_and_read(list(readable_cache.values())) diff --git a/src/blop/plans.py b/src/blop/plans.py new file mode 100644 index 00000000..672a9da7 --- /dev/null +++ b/src/blop/plans.py @@ -0,0 +1,380 @@ +import logging +from collections.abc import Sequence +from typing import Any, Literal, cast + +import bluesky.plan_stubs as bps +import bluesky.plans as bp +import bluesky.preprocessors as bpp +from bluesky.protocols import Readable +from bluesky.utils import MsgGenerator, plan + +from .plan_stubs import read_step +from .protocols import ID_KEY, Actuator, CanRegisterSuggestions, Checkpointable, OptimizationProblem, Optimizer, Sensor +from .utils import InferredReadable, collect_optimization_metadata, route_suggestions + +logger = logging.getLogger(__name__) + +_DEFAULT_ACQUIRE_RUN_KEY: Literal["default_acquire"] = "default_acquire" +_SAMPLE_SUGGESTIONS_RUN_KEY: Literal["sample_suggestions"] = "sample_suggestions" +_OPTIMIZE_RUN_KEY: Literal["optimize"] = "optimize" + + +def _unpack_for_list_scan(suggestions: list[dict], actuators: Sequence[Actuator]) -> list[Any]: + """Unpack the actuators and inputs into Bluesky list_scan plan arguments.""" + actuators_and_inputs = {actuator: [suggestion[actuator.name] for suggestion in suggestions] for actuator in actuators} + unpacked_list = [] + for actuator, values in actuators_and_inputs.items(): + unpacked_list.append(actuator) + unpacked_list.append(values) + + return unpacked_list + + +@plan +def default_acquire( + suggestions: list[dict], + actuators: Sequence[Actuator], + sensors: Sequence[Sensor] | None = None, + *, + per_step: bp.PerStep | None = None, + **kwargs: Any, +) -> MsgGenerator[str]: + """ + A default plan to acquire data for optimization. Simply a list scan. + + Includes a default metadata key "blop_suggestion_ids" which can be used to identify + the suggestions that were acquired for each step of the scan. + + Parameters + ---------- + suggestions: list[dict] + A list of dictionaries, each containing the parameterization of a point to evaluate. + The "_id" key is optional and can be used to identify each suggestion. It is suggested + to add "_id" values to the run metadata for later identification of the acquired data. + actuators: Sequence[Actuator] + The actuators to move and the inputs to move them to. + sensors: Sequence[Sensor] + The sensors that produce data to evaluate. + per_step: bp.PerStep | None, optional + The plan to execute for each step of the scan. + **kwargs: Any + Additional keyword arguments to pass to the list_scan plan. + + Returns + ------- + str + The UID of the Bluesky run. + + See Also + -------- + bluesky.plans.list_scan : The Bluesky plan to acquire data. + """ + if sensors is None: + sensors = [] + readables = [s for s in sensors if isinstance(s, Readable)] + if len(readables) != len(sensors): + logger.warning(f"Some sensors are not readable and will be ignored. Using only the readable sensors: {readables}") + + if len(suggestions) > 1: + if all(isinstance(actuator, Readable) for actuator in actuators): + current_position = yield from seq_read(cast(Sequence[Readable], actuators)) + else: + current_position = None + suggestions = route_suggestions(suggestions, starting_position=current_position) + + md = {"blop_suggestions": suggestions, "run_key": _DEFAULT_ACQUIRE_RUN_KEY} + plan_args = _unpack_for_list_scan(suggestions, actuators) + return ( + # TODO: fix argument type in bluesky.plans.list_scan + yield from bpp.set_run_key_wrapper( + bp.list_scan( + readables, + *plan_args, # type: ignore[arg-type] + per_step=per_step, + md=md, + **kwargs, + ), + _DEFAULT_ACQUIRE_RUN_KEY, + ) + ) + + +@plan +def optimize_step( + optimization_problem: OptimizationProblem, + n_points: int = 1, + *args: Any, + **kwargs: Any, +) -> MsgGenerator[tuple[str, list[dict], list[dict]]]: + """ + A single step of the optimization loop. + + Parameters + ---------- + optimization_problem : OptimizationProblem + The optimization problem to solve. + n_points : int, optional + The number of points to suggest. + + Returns + ------- + tuple[list[dict], list[dict]] + A tuple containing the suggestions and outcomes of the step. + """ + if optimization_problem.acquisition_plan is None: + acquisition_plan = default_acquire + else: + acquisition_plan = optimization_problem.acquisition_plan + optimizer = optimization_problem.optimizer + actuators = optimization_problem.actuators + suggestions = optimizer.suggest(n_points) + if any(ID_KEY not in suggestion for suggestion in suggestions): + raise ValueError( + f"All suggestions must contain an '{ID_KEY}' key to later match with the outcomes. Please review your " + f"optimizer implementation. Got suggestions: {suggestions}" + ) + + uid = yield from acquisition_plan(suggestions, actuators, optimization_problem.sensors, *args, **kwargs) + outcomes = optimization_problem.evaluation_function(uid, suggestions) + if any(ID_KEY not in outcome for outcome in outcomes): + raise ValueError( + f"All outcomes must contain an '{ID_KEY}' key that matches with the suggestions. Please review your " + f"evaluation function. Got suggestions: {suggestions} and outcomes: {outcomes}" + ) + optimizer.ingest(outcomes) + + return uid, suggestions, outcomes + + +def _maybe_checkpoint(optimizer: Optimizer, checkpoint_interval: int | None, iteration: int) -> None: + """Helper function to maybe create a checkpoint of the optimizer state at a given interval and iteration.""" + if checkpoint_interval and (iteration + 1) % checkpoint_interval == 0: + if not isinstance(optimizer, Checkpointable): + raise ValueError( + "The optimizer is not checkpointable. Please review your optimizer configuration or implementation." + ) + optimizer.checkpoint() + + +@plan +def optimize( + optimization_problem: OptimizationProblem, + iterations: int = 1, + n_points: int = 1, + checkpoint_interval: int | None = None, + readable_cache: dict[str, InferredReadable] | None = None, + **kwargs: Any, +) -> MsgGenerator[None]: + """ + A plan to solve the optimization problem. + + Parameters + ---------- + optimization_problem : OptimizationProblem + The optimization problem to solve. + iterations : int, optional + The number of optimization iterations to run. + n_points : int, optional + The number of points to suggest per iteration. + checkpoint_interval : int | None, optional + The number of iterations between optimizer checkpoints. If None, checkpoints + will not be saved. Optimizer must implement the + :class:`blop.protocols.Checkpointable` protocol. + readable_cache: dict[str, InferredReadable] | None = None + Cache of readable objects to store the suggestions and outcomes as events. + If None, a new cache will be created. + **kwargs : Any + Additional keyword arguments to pass to the :func:`optimize_step` plan. + + See Also + -------- + blop.protocols.OptimizationProblem : The problem to solve. + blop.protocols.Checkpointable : The protocol for checkpointable objects. + optimize_step : The plan to execute a single step of the optimization. + """ + + # Cache to track readables created from suggestions and outcomes + readable_cache = readable_cache or {} + + _md = collect_optimization_metadata(optimization_problem) + _md.update( + { + "plan_name": "optimize", + "iterations": iterations, + "n_points": n_points, + "checkpoint_interval": checkpoint_interval, + "run_key": _OPTIMIZE_RUN_KEY, + } + ) + + # Encapsulate the optimization plan in a run decorator + @bpp.set_run_key_decorator(_OPTIMIZE_RUN_KEY) + @bpp.run_decorator(md=_md) + def _optimize() -> MsgGenerator[None]: + for i in range(iterations): + # Perform a single step of the optimization + uid, suggestions, outcomes = yield from optimize_step(optimization_problem, n_points, **kwargs) + + # Read the optimization step into the Bluesky and emit events for each suggestion and outcome + yield from read_step(uid, suggestions, outcomes, n_points, readable_cache) + + # Possibly take a checkpoint of the optimizer state + _maybe_checkpoint(optimization_problem.optimizer, checkpoint_interval, i) + + # Start the optimization run + return (yield from _optimize()) + + +@plan +def sample_suggestions( + optimization_problem: OptimizationProblem, + suggestions: list[dict], + readable_cache: dict[str, InferredReadable] | None = None, + **kwargs: Any, +) -> MsgGenerator[tuple[str, list[dict], list[dict]]]: + """ + Evaluate specific parameter combinations. + + This plan acquires data for given suggestions and ingests results into the optimizer. + Supports both optimizer-generated suggestions (with "_id") and manual points + (without "_id", if optimizer implements CanRegisterSuggestions). + + Parameters + ---------- + optimization_problem : OptimizationProblem + The optimization problem. + suggestions : list[dict] + Parameter combinations to evaluate. Can be: + + - Optimizer suggestions (with "_id" keys from suggest()) + - Manual points (without "_id", requires CanRegisterSuggestions protocol) + + readable_cache : dict[str, InferredReadable] | None + Cache for storing suggestions/outcomes as events. + **kwargs : Any + Additional arguments for acquisition plan. + + Returns + ------- + uid : str + Bluesky run UID. + suggestions : list[dict] + Suggestions with "_id" keys. + outcomes : list[dict] + Evaluated outcomes. + + Raises + ------ + ValueError + If suggestions lack "_id" and optimizer doesn't implement CanRegisterSuggestions. + + See Also + -------- + optimize_step : Standard optimizer-driven step. + blop.protocols.CanRegisterSuggestions : Protocol for manual suggestions. + """ + + # Ensure the suggestions have an ID_KEY or register them with the optimizer + if not isinstance(optimization_problem.optimizer, CanRegisterSuggestions) and any( + ID_KEY not in suggestion for suggestion in suggestions + ): + raise ValueError( + f"All suggestions must contain an '{ID_KEY}' key to later match with the outcomes or your optimizer must " + "implement the `blop.protocols.CanRegisterSuggestions` protocol. Please review your optimizer " + f"implementation. Got suggestions: {suggestions}" + ) + elif isinstance(optimization_problem.optimizer, CanRegisterSuggestions): + suggestions = optimization_problem.optimizer.register_suggestions(suggestions) + + # Collect the metadata for the run + _md = collect_optimization_metadata(optimization_problem) + _md.update( + { + "plan_name": "sample_suggestions", + "suggestions": suggestions, + "run_key": _SAMPLE_SUGGESTIONS_RUN_KEY, + } + ) + + @bpp.set_run_key_decorator(_SAMPLE_SUGGESTIONS_RUN_KEY) + @bpp.run_decorator(md=_md) + def _inner_sample_suggestions() -> MsgGenerator[tuple[str, list[dict], list[dict]]]: + + # Acquire data, evaluate, and ingest outcomes + if optimization_problem.acquisition_plan is None: + acquisition_plan = default_acquire + else: + acquisition_plan = optimization_problem.acquisition_plan + uid = yield from acquisition_plan( + suggestions, optimization_problem.actuators, optimization_problem.sensors, **kwargs + ) + outcomes = optimization_problem.evaluation_function(uid, suggestions) + optimization_problem.optimizer.ingest(outcomes) + + # Emit a Bluesky event + yield from read_step(uid, suggestions, outcomes, len(suggestions), readable_cache or {}) + + return uid, suggestions, outcomes + + return (yield from _inner_sample_suggestions()) + + +@plan +def seq_read(readables: Sequence[Readable], **kwargs: Any) -> MsgGenerator[dict[str, Any]]: + """ + Read the current values of the given readables. + + Parameters + ---------- + readables : Sequence[Readable] + The readables to read. + + Returns + ------- + dict[str, Any] + A dictionary of the readable names and their current values. + """ + results = {} + for readable in readables: + results[readable.name] = yield from bps.rd(readable, **kwargs) + return results + + +def acquire_baseline( + optimization_problem: OptimizationProblem, + parameterization: dict[str, Any] | None = None, + **kwargs: Any, +) -> MsgGenerator[None]: + """ + Acquire a baseline reading. Useful for relative outcome constraints. + + Parameters + ---------- + optimization_problem : OptimizationProblem + The optimization problem to solve. + parameterization : dict[str, Any] | None = None + Move the DOFs to the given parameterization, if provided. + + See Also + -------- + default_acquire : The default plan to acquire data. + """ + actuators = optimization_problem.actuators + if parameterization is None: + if all(isinstance(actuator, Readable) for actuator in actuators): + parameterization = yield from seq_read(cast(Sequence[Readable], actuators)) + else: + raise ValueError( + "All actuators must also implement the Readable protocol to acquire a baseline from current positions." + ) + if ID_KEY not in parameterization: + parameterization[ID_KEY] = "baseline" + optimizer = optimization_problem.optimizer + if optimization_problem.acquisition_plan is None: + acquisition_plan = default_acquire + else: + acquisition_plan = optimization_problem.acquisition_plan + uid = yield from acquisition_plan([parameterization], actuators, optimization_problem.sensors, **kwargs) + outcome = optimization_problem.evaluation_function(uid, [parameterization])[0] + data = {**outcome, **parameterization} + optimizer.ingest([data]) diff --git a/src/blop/plans/__init__.py b/src/blop/plans/__init__.py deleted file mode 100644 index fc196814..00000000 --- a/src/blop/plans/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -from .plans import ( - acquire_baseline, - acquire_with_background, - default_acquire, - optimize, - optimize_step, - per_step_background_read, - read, - take_reading_with_background, -) -from .utils import get_route_index, route_suggestions - -__all__ = [ - "acquire_baseline", - "acquire_with_background", - "default_acquire", - "get_route_index", - "optimize", - "optimize_step", - "per_step_background_read", - "read", - "route_suggestions", - "take_reading_with_background", -] diff --git a/src/blop/plans/plans.py b/src/blop/plans/plans.py deleted file mode 100644 index 3918ecd2..00000000 --- a/src/blop/plans/plans.py +++ /dev/null @@ -1,492 +0,0 @@ -import functools -import logging -from collections import defaultdict -from collections.abc import Callable, Mapping, Sequence -from typing import Any, Literal, cast - -import bluesky.plan_stubs as bps -import bluesky.plans as bp -import bluesky.preprocessors as bpp -import numpy as np -from bluesky.protocols import Readable, Reading -from bluesky.utils import MsgGenerator, plan - -from ..protocols import ID_KEY, Actuator, Checkpointable, OptimizationProblem, Optimizer, Sensor -from .utils import InferredReadable, route_suggestions - -logger = logging.getLogger(__name__) - -_BLUESKY_UID_KEY: Literal["bluesky_uid"] = "bluesky_uid" -_SUGGESTION_IDS_KEY: Literal["suggestion_ids"] = "suggestion_ids" -_DEFAULT_ACQUIRE_RUN_KEY: Literal["default_acquire"] = "default_acquire" -_OPTIMIZE_RUN_KEY: Literal["optimize"] = "optimize" - - -def _unpack_for_list_scan(suggestions: list[dict], actuators: Sequence[Actuator]) -> list[Any]: - """Unpack the actuators and inputs into Bluesky list_scan plan arguments.""" - actuators_and_inputs = {actuator: [suggestion[actuator.name] for suggestion in suggestions] for actuator in actuators} - unpacked_list = [] - for actuator, values in actuators_and_inputs.items(): - unpacked_list.append(actuator) - unpacked_list.append(values) - - return unpacked_list - - -@plan -def default_acquire( - suggestions: list[dict], - actuators: Sequence[Actuator], - sensors: Sequence[Sensor] | None = None, - *, - per_step: bp.PerStep | None = None, - **kwargs: Any, -) -> MsgGenerator[str]: - """ - A default plan to acquire data for optimization. Simply a list scan. - - Includes a default metadata key "blop_suggestion_ids" which can be used to identify - the suggestions that were acquired for each step of the scan. - - Parameters - ---------- - suggestions: list[dict] - A list of dictionaries, each containing the parameterization of a point to evaluate. - The "_id" key is optional and can be used to identify each suggestion. It is suggested - to add "_id" values to the run metadata for later identification of the acquired data. - actuators: Sequence[Actuator] - The actuators to move and the inputs to move them to. - sensors: Sequence[Sensor] - The sensors that produce data to evaluate. - per_step: bp.PerStep | None, optional - The plan to execute for each step of the scan. - **kwargs: Any - Additional keyword arguments to pass to the list_scan plan. - - Returns - ------- - str - The UID of the Bluesky run. - - See Also - -------- - bluesky.plans.list_scan : The Bluesky plan to acquire data. - """ - if sensors is None: - sensors = [] - readables = [s for s in sensors if isinstance(s, Readable)] - if len(readables) != len(sensors): - logger.warning(f"Some sensors are not readable and will be ignored. Using only the readable sensors: {readables}") - - if len(suggestions) > 1: - if all(isinstance(actuator, Readable) for actuator in actuators): - current_position = yield from read(cast(Sequence[Readable], actuators)) - else: - current_position = None - suggestions = route_suggestions(suggestions, starting_position=current_position) - - md = {"blop_suggestions": suggestions, "run_key": _DEFAULT_ACQUIRE_RUN_KEY} - plan_args = _unpack_for_list_scan(suggestions, actuators) - return ( - # TODO: fix argument type in bluesky.plans.list_scan - yield from bpp.set_run_key_wrapper( - bp.list_scan( - readables, - *plan_args, # type: ignore[arg-type] - per_step=per_step, - md=md, - **kwargs, - ), - _DEFAULT_ACQUIRE_RUN_KEY, - ) - ) - - -@plan -def optimize_step( - optimization_problem: OptimizationProblem, - n_points: int = 1, - *args: Any, - **kwargs: Any, -) -> MsgGenerator[tuple[str, list[dict], list[dict]]]: - """ - A single step of the optimization loop. - - Parameters - ---------- - optimization_problem : OptimizationProblem - The optimization problem to solve. - n_points : int, optional - The number of points to suggest. - - Returns - ------- - tuple[list[dict], list[dict]] - A tuple containing the suggestions and outcomes of the step. - """ - if optimization_problem.acquisition_plan is None: - acquisition_plan = default_acquire - else: - acquisition_plan = optimization_problem.acquisition_plan - optimizer = optimization_problem.optimizer - actuators = optimization_problem.actuators - suggestions = optimizer.suggest(n_points) - if any(ID_KEY not in suggestion for suggestion in suggestions): - raise ValueError( - f"All suggestions must contain an '{ID_KEY}' key to later match with the outcomes. Please review your " - f"optimizer implementation. Got suggestions: {suggestions}" - ) - - uid = yield from acquisition_plan(suggestions, actuators, optimization_problem.sensors, *args, **kwargs) - outcomes = optimization_problem.evaluation_function(uid, suggestions) - if any(ID_KEY not in outcome for outcome in outcomes): - raise ValueError( - f"All outcomes must contain an '{ID_KEY}' key that matches with the suggestions. Please review your " - f"evaluation function. Got suggestions: {suggestions} and outcomes: {outcomes}" - ) - optimizer.ingest(outcomes) - - return uid, suggestions, outcomes - - -def _maybe_checkpoint(optimizer: Optimizer, checkpoint_interval: int | None, iteration: int) -> None: - """Helper function to maybe create a checkpoint of the optimizer state at a given interval and iteration.""" - if checkpoint_interval and (iteration + 1) % checkpoint_interval == 0: - if not isinstance(optimizer, Checkpointable): - raise ValueError( - "The optimizer is not checkpointable. Please review your optimizer configuration or implementation." - ) - optimizer.checkpoint() - - -@plan -def _read_step( - uid: str, suggestions: list[dict], outcomes: list[dict], n_points: int, readable_cache: dict[str, InferredReadable] -) -> MsgGenerator[None]: - """Helper plan to read the suggestions and outcomes of a single optimization step. - - If fewer suggestions are returned than n_points arrays are padded to n_points length - with np.nan to ensure consistent shapes for event-model specification. - - Parameters - ---------- - uid : str - The Bluesky run UID from the acquisition plan. - suggestions : list[dict] - List of suggestion dictionaries, each containing an ID_KEY. - outcomes : list[dict] - List of outcome dictionaries, each containing an ID_KEY matching suggestions. - n_points : int - Expected number of suggestions. Arrays will be padded to this length if needed. - readable_cache : dict[str, InferredReadable] - Cache of InferredReadable objects to reuse across iterations. - """ - # Group by ID_KEY to get proper suggestion/outcome order - suggestion_by_id = {} - outcome_by_id = {} - for suggestion in suggestions: - suggestion_copy = suggestion.copy() - key = str(suggestion_copy.pop(ID_KEY)) - suggestion_by_id[key] = suggestion_copy - for outcome in outcomes: - outcome_copy = outcome.copy() - key = str(outcome_copy.pop(ID_KEY)) - outcome_by_id[key] = outcome_copy - sids = {str(sid) for sid in suggestion_by_id.keys()} - if sids != set(outcome_by_id.keys()): - raise ValueError( - "The suggestions and outcomes must contain the same IDs. Got suggestions: " - f"{set(suggestion_by_id.keys())} and outcomes: {set(outcome_by_id.keys())}" - ) - - # Flatten the suggestions and outcomes into a single dictionary of lists - suggestions_flat: dict[str, list[Any]] = defaultdict(list) - outcomes_flat: dict[str, list[Any]] = defaultdict(list) - # Sort for deterministic ordering, not strictly necessary - sorted_sids = sorted(sids) - for key in sorted_sids: - for name, value in suggestion_by_id[key].items(): - suggestions_flat[name].append(value) - for name, value in outcome_by_id[key].items(): - outcomes_flat[name].append(value) - - # Pad arrays to n_points if suggestions had fewer trials than expected - # TODO: Use awkward-array to handle this in the future - actual_n = len(sorted_sids) - if actual_n < n_points: - # Pad suggestion arrays with NaN - for name in suggestions_flat: - suggestions_flat[name].extend([np.nan] * (n_points - actual_n)) - # Pad outcome arrays with NaN - for name in outcomes_flat: - outcomes_flat[name].extend([np.nan] * (n_points - actual_n)) - # Pad suggestion IDs with empty string to maintain string dtype - sorted_sids.extend([""] * (n_points - actual_n)) - - # Create or update the InferredReadables for the suggestion_ids, step uid, suggestions, and outcomes - if _SUGGESTION_IDS_KEY not in readable_cache: - readable_cache[_SUGGESTION_IDS_KEY] = InferredReadable(_SUGGESTION_IDS_KEY, initial_value=sorted_sids) - else: - readable_cache[_SUGGESTION_IDS_KEY].update(sorted_sids) - if _BLUESKY_UID_KEY not in readable_cache: - readable_cache[_BLUESKY_UID_KEY] = InferredReadable(_BLUESKY_UID_KEY, initial_value=uid) - else: - readable_cache[_BLUESKY_UID_KEY].update(uid) - for name, value in suggestions_flat.items(): - if name not in readable_cache: - readable_cache[name] = InferredReadable(name, initial_value=value) - else: - readable_cache[name].update(value) - for name, value in outcomes_flat.items(): - if name not in readable_cache: - readable_cache[name] = InferredReadable(name, initial_value=value) - else: - readable_cache[name].update(value) - - # Read and save to produce a single event - yield from bps.trigger_and_read(list(readable_cache.values())) - - -@plan -def optimize( - optimization_problem: OptimizationProblem, - iterations: int = 1, - n_points: int = 1, - checkpoint_interval: int | None = None, - *args: Any, - **kwargs: Any, -) -> MsgGenerator[None]: - """ - A plan to solve the optimization problem. - - Parameters - ---------- - optimization_problem : OptimizationProblem - The optimization problem to solve. - iterations : int, optional - The number of optimization iterations to run. - n_points : int, optional - The number of points to suggest per iteration. - checkpoint_interval : int | None, optional - The number of iterations between optimizer checkpoints. If None, checkpoints - will not be saved. Optimizer must implement the - :class:`blop.protocols.Checkpointable` protocol. - *args : Any - Additional positional arguments to pass to the :func:`optimize_step` plan. - **kwargs : Any - Additional keyword arguments to pass to the :func:`optimize_step` plan. - - See Also - -------- - blop.protocols.OptimizationProblem : The problem to solve. - blop.protocols.Checkpointable : The protocol for checkpointable objects. - optimize_step : The plan to execute a single step of the optimization. - """ - - # Cache to track readables created from suggestions and outcomes - readable_cache: dict[str, InferredReadable] = {} - - # Collect metadata for this optimization run - if hasattr(optimization_problem.evaluation_function, "__name__"): - evaluation_function_name = optimization_problem.evaluation_function.__name__ # type: ignore[attr-defined] - else: - evaluation_function_name = optimization_problem.evaluation_function.__class__.__name__ - if hasattr(optimization_problem.acquisition_plan, "__name__"): - acquisition_plan_name = optimization_problem.acquisition_plan.__name__ # type: ignore[attr-defined] - else: - acquisition_plan_name = optimization_problem.acquisition_plan.__class__.__name__ - _md = { - "plan_name": "optimize", - "sensors": [sensor.name for sensor in optimization_problem.sensors], - "actuators": [actuator.name for actuator in optimization_problem.actuators], - "evaluation_function": evaluation_function_name, - "acquisition_plan": acquisition_plan_name, - "optimizer": optimization_problem.optimizer.__class__.__name__, - "iterations": iterations, - "n_points": n_points, - "checkpoint_interval": checkpoint_interval, - "run_key": _OPTIMIZE_RUN_KEY, - } - - # Encapsulate the optimization plan in a run decorator - @bpp.set_run_key_decorator(_OPTIMIZE_RUN_KEY) - @bpp.run_decorator(md=_md) - def _optimize(): - for i in range(iterations): - # Perform a single step of the optimization - uid, suggestions, outcomes = yield from optimize_step(optimization_problem, n_points, *args, **kwargs) - - # Read the optimization step into the Bluesky and emit events for each suggestion and outcome - yield from _read_step(uid, suggestions, outcomes, n_points, readable_cache) - - # Possibly take a checkpoint of the optimizer state - _maybe_checkpoint(optimization_problem.optimizer, checkpoint_interval, i) - - # Start the optimization run - return (yield from _optimize()) - - -@plan -def read(readables: Sequence[Readable], **kwargs: Any) -> MsgGenerator[dict[str, Any]]: - """ - Read the current values of the given readables. - - Parameters - ---------- - readables : Sequence[Readable] - The readables to read. - - Returns - ------- - dict[str, Any] - A dictionary of the readable names and their current values. - """ - results = {} - for readable in readables: - results[readable.name] = yield from bps.rd(readable, **kwargs) - return results - - -def acquire_baseline( - optimization_problem: OptimizationProblem, - parameterization: dict[str, Any] | None = None, - **kwargs: Any, -) -> MsgGenerator[None]: - """ - Acquire a baseline reading. Useful for relative outcome constraints. - - Parameters - ---------- - optimization_problem : OptimizationProblem - The optimization problem to solve. - parameterization : dict[str, Any] | None = None - Move the DOFs to the given parameterization, if provided. - - See Also - -------- - default_acquire : The default plan to acquire data. - """ - actuators = optimization_problem.actuators - if parameterization is None: - if all(isinstance(actuator, Readable) for actuator in actuators): - parameterization = yield from read(cast(Sequence[Readable], actuators)) - else: - raise ValueError( - "All actuators must also implement the Readable protocol to acquire a baseline from current positions." - ) - if ID_KEY not in parameterization: - parameterization[ID_KEY] = "baseline" - optimizer = optimization_problem.optimizer - if optimization_problem.acquisition_plan is None: - acquisition_plan = default_acquire - else: - acquisition_plan = optimization_problem.acquisition_plan - uid = yield from acquisition_plan([parameterization], actuators, optimization_problem.sensors, **kwargs) - outcome = optimization_problem.evaluation_function(uid, [parameterization])[0] - data = {**outcome, **parameterization} - optimizer.ingest([data]) - - -@plan -def take_reading_with_background( - readables: Sequence[Readable], - name: str = "primary", - block_beam: Callable[[], MsgGenerator[None]] | None = None, - unblock_beam: Callable[[], MsgGenerator[None]] | None = None, -) -> MsgGenerator[Mapping[str, Reading]]: - """ - Takes a reading of the readables while the beam is blocked and then again while the beam is unblocked. - - Parameters - ---------- - readables: Sequence[Readable] - The readables to read. - name: str = "primary" - The name of the reading. - block_beam: Callable[[], MsgGenerator[None]] | None = None - A callable that blocks the beam (e.g. by closing a shutter). - unblock_beam: Callable[[], MsgGenerator[None]] | None = None - A callable that unblocks the beam (e.g. by opening a shutter). - - Returns - ------- - Mapping[str, Reading] - The readings from the final trigger_and_read operation. - """ - if block_beam is None or unblock_beam is None: - raise ValueError("block_beam and unblock_beam plans must be provided.") - yield from block_beam() - yield from bps.trigger_and_read(readables, name=f"{name}_background") - yield from unblock_beam() - reading = yield from bps.trigger_and_read(readables, name=name) - return reading - - -def per_step_background_read( - block_beam: Callable[[], MsgGenerator[None]], unblock_beam: Callable[[], MsgGenerator[None]] -) -> bp.PerStep: - """ - Returns a per-step plan function that takes a reading of the readables while the beam is blocked and then - again while the beam is unblocked. - - Useful for downstream analysis that requires per-step background readings (e.g. background subtraction). - - Parameters - ---------- - block_beam: Callable[[], MsgGenerator[None]] - A callable that blocks the beam (e.g. by closing a shutter). - unblock_beam: Callable[[], MsgGenerator[None]] - A callable that unblocks the beam (e.g. by opening a shutter). - - See Also - -------- - bluesky.plans.one_nd_step : The Bluesky plan to execute for each step of the scan. - """ - take_reading = functools.partial(take_reading_with_background, block_beam=block_beam, unblock_beam=unblock_beam) - return functools.partial(bps.one_nd_step, take_reading=take_reading) - - -@plan -def acquire_with_background( - suggestions: list[dict], - actuators: Sequence[Actuator], - sensors: Sequence[Sensor] | None = None, - *, - block_beam: Callable[[], MsgGenerator[None]], - unblock_beam: Callable[[], MsgGenerator[None]], - **kwargs: Any, -) -> MsgGenerator[str]: - """ - A plan to acquire data for optimization with background readings. - - Parameters - ---------- - suggestions: list[dict] - A list of dictionaries, each containing the parameterization of a point to evaluate. - The "_id" key is optional and can be used to identify each suggestion. It is suggested - to add "_id" values to the run metadata for later identification of the acquired data. - actuators: Sequence[Actuator] - The actuators to move to their suggested positions. - sensors: Sequence[Sensor] | None = None - The sensors that produce data to evaluate. - block_beam: Callable[[], MsgGenerator[None]] - A Bluesky plan that blocks the beam (e.g. by closing a shutter). - unblock_beam: Callable[[], MsgGenerator[None]] - A Bluesky plan that unblocks the beam (e.g. by opening a shutter). - **kwargs: Any - Additional keyword arguments to pass to the acquisition plan. - - Returns - ------- - str - The UID of the Bluesky run. - - See Also - -------- - acquire : The base plan to acquire data. - per_step_background_read : The per-step plan to take background readings. - """ - if sensors is None: - sensors = [] - per_step = per_step_background_read(block_beam, unblock_beam) - return (yield from default_acquire(suggestions, actuators, sensors, per_step=per_step, **kwargs)) diff --git a/src/blop/protocols.py b/src/blop/protocols.py index 66938492..dc31f0c6 100644 --- a/src/blop/protocols.py +++ b/src/blop/protocols.py @@ -10,6 +10,31 @@ Sensor = Readable | EventCollectable | EventPageCollectable +@runtime_checkable +class CanRegisterSuggestions(Protocol): + """ + A protocol for optimizers that can register suggestions. This + allows them to add an "_id" key to the suggestions dynamically and ensure + that the suggestions are unique. + """ + + def register_suggestions(self, suggestions: list[dict]) -> list[dict]: + """ + Register the suggestions with the optimizer. + + Parameters + ---------- + suggestions: list[dict] + The suggestions to register. The "_id" key is optional and will be overwritten if present. + + Returns + ------- + list[dict] + The original suggestions with an "_id" key added. + """ + ... + + @runtime_checkable class Checkpointable(Protocol): """ diff --git a/src/blop/sim/__init__.py b/src/blop/sim/__init__.py deleted file mode 100644 index 5b72eec5..00000000 --- a/src/blop/sim/__init__.py +++ /dev/null @@ -1,61 +0,0 @@ -import numpy as np -import scipy as sp - - -def get_beam_stats(image: np.ndarray, threshold: float = 0.5) -> dict[str, float | np.ndarray]: - ny, nx = image.shape - - fim = image.copy() - fim -= np.median(fim, axis=0) - fim -= np.median(fim, axis=1)[:, None] - - fim = sp.ndimage.median_filter(fim, size=3) - fim = sp.ndimage.gaussian_filter(fim, sigma=1) - - m = fim > (threshold * fim.max()) - area = m.sum() - if area == 0.0: - return { - "max": 0.0, - "sum": 0.0, - "area": 0.0, - "cen_x": 0.0, - "cen_y": 0.0, - "wid_x": 0.0, - "wid_y": 0.0, - "x_min": 0.0, - "x_max": 0.0, - "y_min": 0.0, - "y_max": 0.0, - "bbox": np.array([[0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0]]), - } - - cs_x = np.cumsum(m.sum(axis=0)) / area - cs_y = np.cumsum(m.sum(axis=1)) / area - - # q_min, q_max = [0.15865, 0.84135] # one sigma - q_min, q_max = [0.05, 0.95] # 90% - - x_min, x_max = np.interp([q_min, q_max], cs_x, np.arange(nx)) - y_min, y_max = np.interp([q_min, q_max], cs_y, np.arange(ny)) - - stats = { - "max": fim.max(), - "sum": fim.sum(), - "area": area, - "cen_x": (x_min + x_max) / 2, - "cen_y": (y_min + y_max) / 2, - "wid_x": x_max - x_min, - "wid_y": y_max - y_min, - "x_min": x_min, - "x_max": x_max, - "y_min": y_min, - "y_max": y_max, - "bbox": [[x_min, x_max, x_max, x_min, x_min], [y_min, y_min, y_max, y_max, y_min]], - } - - return stats - - -from .beamline import DatabrokerBeamline, DatabrokerDetector, TiledBeamline, TiledDetector # noqa: E402, F401 -from .handlers import HDF5Handler # noqa: E402, F401 diff --git a/src/blop/sim/beamline.py b/src/blop/sim/beamline.py deleted file mode 100644 index 2b729c57..00000000 --- a/src/blop/sim/beamline.py +++ /dev/null @@ -1,377 +0,0 @@ -import itertools -from collections import deque -from collections.abc import Generator, Iterator -from datetime import datetime -from pathlib import Path -from typing import Any - -import h5py # type: ignore[import-untyped] -import numpy as np -import scipy as sp # type: ignore[import-untyped] -from event_model import StreamRange, compose_resource, compose_stream_resource # type: ignore[import-untyped] -from ophyd import Component as Cpt # type: ignore[import-untyped] -from ophyd import Device, Kind, Signal # type: ignore[import-untyped] -from ophyd.sim import NullStatus, new_uid # type: ignore[import-untyped] -from ophyd.utils import make_dir_tree # type: ignore[import-untyped] - -from . import get_beam_stats -from .handlers import ExternalFileReference - - -class DatabrokerDetector(Device): - sum = Cpt(Signal, kind="hinted") - max = Cpt(Signal, kind="normal") - area = Cpt(Signal, kind="normal") - cen_x = Cpt(Signal, kind="hinted") - cen_y = Cpt(Signal, kind="hinted") - wid_x = Cpt(Signal, kind="hinted") - wid_y = Cpt(Signal, kind="hinted") - image = Cpt(ExternalFileReference, kind="normal") - image_shape = Cpt(Signal, value=(300, 400), kind="normal") - noise = Cpt(Signal, kind="normal") - - def __init__( - self, root_dir: str = "/tmp/blop/sim", verbose: bool = True, noise: bool = True, *args: Any, **kwargs: Any - ) -> None: - super().__init__(*args, **kwargs) - - _ = make_dir_tree(datetime.now().year, base_path=root_dir) - - self._root_dir = root_dir - self._verbose = verbose - - # Used for the emulated cameras only. - self._img_dir = None - - # Resource/datum docs related variables. - self._asset_docs_cache: deque[tuple[str, dict[str, Any]]] = deque() - self._resource_document: dict[str, Any] | None = None - self._datum_factory: Any | None = None - self._dataset: h5py.Dataset | None = None - self._h5file_desc: h5py.File | None = None - self._counter: Iterator[int] | None = None - - self.noise.put(noise) - - def trigger(self) -> NullStatus: - if not self._counter: - raise RuntimeError("Counter not initialized, make sure to call stage() first.") - if not self._dataset: - raise RuntimeError("Dataset not initialized, make sure to call stage() first.") - if not self._datum_factory: - raise RuntimeError("Datum factory not initialized, make sure to call stage() first.") - super().trigger() - raw_image = self.generate_beam(noise=self.noise.get()) - - current_frame = next(self._counter) - - self._dataset.resize((current_frame + 1, *self.image_shape.get())) - self._dataset[current_frame, :, :] = raw_image - - datum_document = self._datum_factory(datum_kwargs={"frame": current_frame}) - self._asset_docs_cache.append(("datum", datum_document)) - - stats = get_beam_stats(raw_image) - self.image.put(datum_document["datum_id"]) - - for attr in ["max", "sum", "cen_x", "cen_y", "wid_x", "wid_y"]: - getattr(self, attr).put(stats[attr]) - - super().trigger() - - return NullStatus() - - def stage(self) -> list[Any]: - devices = super().stage() - date = datetime.now() - self._assets_dir = date.strftime("%Y/%m/%d") - data_file = f"{new_uid()}.h5" - - self._resource_document, self._datum_factory, _ = compose_resource( - start={"uid": "needed for compose_resource() but will be discarded"}, - spec="HDF5", - root=self._root_dir, - resource_path=str(Path(self._assets_dir) / Path(data_file)), - resource_kwargs={}, - ) - - if not self._resource_document: - raise RuntimeError("Resource document not initialized.") - - self._data_file = str(Path(self._resource_document["root"]) / Path(self._resource_document["resource_path"])) - - # now discard the start uid, a real one will be added later - self._resource_document.pop("run_start") - self._asset_docs_cache.append(("resource", self._resource_document)) - - self._h5file_desc = h5py.File(self._data_file, "x") - group = self._h5file_desc.create_group("/entry") - self._dataset = group.create_dataset( - "image", - data=np.full(fill_value=np.nan, shape=(1, *self.image_shape.get())), - maxshape=(None, *self.image_shape.get()), - chunks=(1, *self.image_shape.get()), - dtype="float64", - compression="lzf", - ) - self._counter = itertools.count() - return devices - - def unstage(self) -> list[Any]: - devices = super().unstage() - del self._dataset - if self._h5file_desc: - self._h5file_desc.close() - self._resource_document = None - self._datum_factory = None - return devices - - def collect_asset_docs(self) -> Generator[tuple[str, dict[str, Any]], None, None]: - items = list(self._asset_docs_cache) - self._asset_docs_cache.clear() - yield from items - - def generate_beam(self, noise: bool = True) -> np.ndarray: - nx, ny = self.image_shape.get() - - x = np.linspace(-10, 10, ny) - y = np.linspace(-10, 10, nx) - X, Y = np.meshgrid(x, y) - - x0 = self.parent.kbh_ush.get() - self.parent.kbh_dsh.get() - y0 = self.parent.kbv_usv.get() - self.parent.kbv_dsv.get() - x_width = np.sqrt(0.2 + 5e-1 * (self.parent.kbh_ush.get() + self.parent.kbh_dsh.get() - 1) ** 2) - y_width = np.sqrt(0.1 + 5e-1 * (self.parent.kbv_usv.get() + self.parent.kbv_dsv.get() - 2) ** 2) - - beam = np.exp(-0.5 * (((X - x0) / x_width) ** 4 + ((Y - y0) / y_width) ** 4)) / ( - np.sqrt(2 * np.pi) * x_width * y_width - ) - - mask = X > self.parent.ssa_inboard.get() - mask &= X < self.parent.ssa_outboard.get() - mask &= Y > self.parent.ssa_lower.get() - mask &= Y < self.parent.ssa_upper.get() - mask = sp.ndimage.gaussian_filter(mask.astype(float), sigma=1) - - image = beam * mask - - if noise: - kx = np.fft.fftfreq(n=len(x), d=0.1) - ky = np.fft.fftfreq(n=len(y), d=0.1) - KX, KY = np.meshgrid(kx, ky) - - power_spectrum = 1 / (1e-2 + KX**2 + KY**2) - - white_noise = 1e-3 * np.random.standard_normal(size=X.shape) - pink_noise = 1e-3 * np.real(np.fft.ifft2(power_spectrum * np.fft.fft2(np.random.standard_normal(size=X.shape)))) - # background = 5e-3 * (X - Y) / X.max() - - image += white_noise + pink_noise - - return image - - -class TiledDetector(Device): - sum = Cpt(Signal, kind=Kind.hinted) - max = Cpt(Signal, kind=Kind.normal) - area = Cpt(Signal, kind=Kind.normal) - cen_x = Cpt(Signal, kind=Kind.hinted) - cen_y = Cpt(Signal, kind=Kind.hinted) - wid_x = Cpt(Signal, kind=Kind.hinted) - wid_y = Cpt(Signal, kind=Kind.hinted) - image = Cpt(ExternalFileReference, kind=Kind.omitted) - image_shape = Cpt(Signal, value=(300, 400), kind=Kind.omitted) - noise = Cpt(Signal, kind=Kind.normal) - - def __init__(self, root_dir: str = "/tmp/blop/sim", verbose: bool = True, noise: bool = True, *args: Any, **kwargs: Any): - super().__init__(*args, **kwargs) - - _ = make_dir_tree(datetime.now().year, base_path=root_dir) - - self._root_dir = root_dir - self._verbose = verbose - - # Used for the emulated cameras only. - self._img_dir = None - - # Resource/datum docs related variables. - self._asset_docs_cache: deque[tuple[str, dict[str, Any]]] = deque() - self._stream_resource_document: dict[str, Any] | None = None - self._stream_datum_factory: Any | None = None - self._dataset: h5py.Dataset | None = None - self._counter: Iterator[int] | None = None - self.noise.put(noise) - - def trigger(self): - if not self._counter: - raise RuntimeError("Counter not initialized, make sure to call stage() first.") - if not self._dataset: - raise RuntimeError("Dataset not initialized, make sure to call stage() first.") - if not self._stream_datum_factory: - raise RuntimeError("Datum factory not initialized, make sure to call stage() first.") - super().trigger() - raw_image = self.generate_beam(noise=self.noise.get()) - - current_frame = next(self._counter) - - self._dataset.resize((current_frame + 1, *self.image_shape.get())) - - self._dataset[current_frame, :, :] = raw_image - - stream_datum_document = self._stream_datum_factory( - StreamRange(start=current_frame, stop=current_frame + 1), - ) - self._asset_docs_cache.append(("stream_datum", stream_datum_document)) - - stats = get_beam_stats(raw_image) - - for attr in ["max", "sum", "cen_x", "cen_y", "wid_x", "wid_y"]: - getattr(self, attr).put(stats[attr]) - - super().trigger() - - return NullStatus() - - def _generate_file_path(self, date_template="%Y/%m/%d"): - date = datetime.now() - assets_dir = date.strftime(date_template) - data_file = f"{new_uid()}.h5" - - return Path(self._root_dir) / Path(assets_dir) / Path(data_file) - - def stage(self): - devices = super().stage() - - self._asset_docs_cache.clear() - full_path = self._generate_file_path() - image_shape = self.image_shape.get() - - uri = f"file://localhost/{str(full_path).strip('/')}" - - ( - self._stream_resource_document, - self._stream_datum_factory, - ) = compose_stream_resource( - mimetype="application/x-hdf5", - uri=uri, - data_key=self.image.name, - parameters={ - "chunk_shape": (1, *image_shape), - "dataset": "/entry/image", - }, - ) - - self._data_file = full_path - - self._asset_docs_cache.append(("stream_resource", self._stream_resource_document)) - - self._h5file_desc = h5py.File(self._data_file, "x") - group = self._h5file_desc.create_group("/entry") - self._dataset = group.create_dataset( - "image", - data=np.full(fill_value=np.nan, shape=(1, *image_shape)), - maxshape=(None, *image_shape), - chunks=(1, *image_shape), - dtype="float64", - compression="lzf", - ) - self._counter = itertools.count() - return devices - - def unstage(self) -> list[Any]: - devices = super().unstage() - del self._dataset - if self._h5file_desc: - self._h5file_desc.close() - self._resource_document = None - self._datum_factory = None - return devices - - def describe(self): - res = super().describe() - res[self.image.name] = { - "shape": [1, *self.image_shape.get()], - "external": "STREAM:", - "source": "sim", - "dtype": "array", - "dtype_numpy": np.dtype(np.float64).str, - } # self.parent.ssa_inboard.get() - mask &= X < self.parent.ssa_outboard.get() - mask &= Y > self.parent.ssa_lower.get() - mask &= Y < self.parent.ssa_upper.get() - mask = sp.ndimage.gaussian_filter(mask.astype(float), sigma=1) - - image = beam * mask - - if noise: - kx = np.fft.fftfreq(n=len(x), d=0.1) - ky = np.fft.fftfreq(n=len(y), d=0.1) - KX, KY = np.meshgrid(kx, ky) - - power_spectrum = 1 / (1e-2 + KX**2 + KY**2) - - white_noise = 1e-3 * np.random.standard_normal(size=X.shape) - pink_noise = 1e-3 * np.real(np.fft.ifft2(power_spectrum * np.fft.fft2(np.random.standard_normal(size=X.shape)))) - # background = 5e-3 * (X - Y) / X.max() - - image += white_noise + pink_noise - - return image - - -class DatabrokerBeamline(Device): - det = Cpt(DatabrokerDetector) - - kbh_ush = Cpt(Signal, kind=Kind.hinted) - kbh_dsh = Cpt(Signal, kind=Kind.hinted) - kbv_usv = Cpt(Signal, kind=Kind.hinted) - kbv_dsv = Cpt(Signal, kind=Kind.hinted) - - ssa_inboard = Cpt(Signal, value=-5.0, kind=Kind.hinted) - ssa_outboard = Cpt(Signal, value=5.0, kind=Kind.hinted) - ssa_lower = Cpt(Signal, value=-5.0, kind=Kind.hinted) - ssa_upper = Cpt(Signal, value=5.0, kind=Kind.hinted) - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - -class TiledBeamline(Device): - det = Cpt(TiledDetector) - - kbh_ush = Cpt(Signal, kind=Kind.hinted) - kbh_dsh = Cpt(Signal, kind=Kind.hinted) - kbv_usv = Cpt(Signal, kind=Kind.hinted) - kbv_dsv = Cpt(Signal, kind=Kind.hinted) - - ssa_inboard = Cpt(Signal, value=-5.0, kind=Kind.hinted) - ssa_outboard = Cpt(Signal, value=5.0, kind=Kind.hinted) - ssa_lower = Cpt(Signal, value=-5.0, kind=Kind.hinted) - ssa_upper = Cpt(Signal, value=5.0, kind=Kind.hinted) - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) diff --git a/src/blop/sim/handlers.py b/src/blop/sim/handlers.py deleted file mode 100644 index 2afde316..00000000 --- a/src/blop/sim/handlers.py +++ /dev/null @@ -1,31 +0,0 @@ -import h5py # type: ignore[import-untyped] -from area_detector_handlers.handlers import HandlerBase # type: ignore[import-untyped] -from ophyd import Signal # type: ignore[import-untyped] - - -class HDF5Handler(HandlerBase): - specs = {"HDF5"} - - def __init__(self, filename): - self._name = filename - - def __call__(self, frame): - with h5py.File(self._name, "r") as f: - entry = f["/entry/image"] - return entry[frame] - - -class ExternalFileReference(Signal): - """ - A pure software Signal that describe()s an image in an external file. - """ - - def describe(self): - resource_document_data = super().describe() - resource_document_data[self.name].update( - { - "external": "FILESTORE:", - "dtype": "array", - } - ) - return resource_document_data diff --git a/src/blop/sim/xrt_beamline.py b/src/blop/sim/xrt_beamline.py deleted file mode 100644 index 03a0f269..00000000 --- a/src/blop/sim/xrt_beamline.py +++ /dev/null @@ -1,660 +0,0 @@ -import itertools -import time -from collections import deque -from datetime import datetime -from pathlib import Path - -import h5py -import matplotlib as mpl -import numpy as np -import scipy as sp -from event_model import StreamRange, compose_resource, compose_stream_resource -from ophyd import Any, Device, EpicsSignal, Kind, Signal -from ophyd import Component as Cpt -from ophyd.sim import NullStatus, new_uid -from ophyd.utils import make_dir_tree - -from . import get_beam_stats -from .handlers import ExternalFileReference -from .xrt_kb_model import build_beamline, build_histRGB, run_process - -TEST = False - - -class DatabrokerxrtEpicsScreen(Device): - sum = Cpt(Signal, kind=Kind.hinted) - max = Cpt(Signal, kind=Kind.normal) - area = Cpt(Signal, kind=Kind.normal) - cen_x = Cpt(Signal, kind="hinted") - cen_y = Cpt(Signal, kind="hinted") - wid_x = Cpt(Signal, kind="hinted") - wid_y = Cpt(Signal, kind="hinted") - image = Cpt(EpicsSignal, "BL:Screen1:Array", kind="normal") - acquire = Cpt(EpicsSignal, "BL:Screen1:Acquire", kind="normal") - image_shape = Cpt(Signal, value=(300, 400), kind="normal") - noise = Cpt(Signal, kind="normal") - - def __init__(self, root_dir: str = "/tmp/blop/sim", verbose: bool = True, noise: bool = True, *args, **kwargs): - _ = make_dir_tree(datetime.now().year, base_path=root_dir) - - self._root_dir = root_dir - self._verbose = verbose - - # Used for the emulated cameras only. - self._img_dir = None - - # Resource/datum docs related variables. - self._asset_docs_cache = deque() - self._resource_document = None - self._datum_factory = None - super().__init__(*args, **kwargs) - - def trigger(self): - super().trigger() - self.acquire.put(1) - while self.acquire.get() > 0: - time.sleep(0.01) - raw_image = self.image.get() - image = raw_image.reshape(*self.image_shape.get()) - - current_frame = next(self._counter) - - self._dataset.resize((current_frame + 1, *self.image_shape.get())) - - self._dataset[current_frame, :, :] = image - - datum_document = self._datum_factory(datum_kwargs={"frame": current_frame}) - self._asset_docs_cache.append(("datum", datum_document)) - - stats = get_beam_stats(image) - - for attr in ["max", "sum", "cen_x", "cen_y", "wid_x", "wid_y"]: - getattr(self, attr).put(stats[attr]) - - return NullStatus() - - def stage(self): - super().stage() - date = datetime.now() - self._assets_dir = date.strftime("%Y/%m/%d") - data_file = f"{new_uid()}.h5" - - self._resource_document, self._datum_factory, _ = compose_resource( - start={"uid": "needed for compose_resource() but will be discarded"}, - spec="HDF5", - root=self._root_dir, - resource_path=str(Path(self._assets_dir) / Path(data_file)), - resource_kwargs={}, - ) - - self._data_file = str(Path(self._resource_document["root"]) / Path(self._resource_document["resource_path"])) - - # now discard the start uid, a real one will be added later - self._resource_document.pop("run_start") - self._asset_docs_cache.append(("resource", self._resource_document)) - - self._h5file_desc = h5py.File(self._data_file, "x") - group = self._h5file_desc.create_group("/entry") - self._dataset = group.create_dataset( - "image", - data=np.full(fill_value=np.nan, shape=(1, *self.image_shape.get())), - maxshape=(None, *self.image_shape.get()), - chunks=(1, *self.image_shape.get()), - dtype="float64", - compression="lzf", - ) - self._counter = itertools.count() - - def unstage(self): - super().unstage() - del self._dataset - self._h5file_desc.close() - self._resource_document = None - self._datum_factory = None - - -class DatabrokerDetector(Device): - sum = Cpt(Signal, kind=Kind.hinted) - max = Cpt(Signal, kind=Kind.normal) - area = Cpt(Signal, kind=Kind.normal) - cen_x = Cpt(Signal, kind=Kind.hinted) - cen_y = Cpt(Signal, kind=Kind.hinted) - wid_x = Cpt(Signal, kind=Kind.hinted) - wid_y = Cpt(Signal, kind=Kind.hinted) - image = Cpt(ExternalFileReference, kind=Kind.normal) - image_shape = Cpt(Signal, value=(300, 400), kind=Kind.normal) - noise = Cpt(Signal, kind=Kind.normal) - - def __init__(self, root_dir: str = "/tmp/blop/sim", verbose: bool = True, noise: bool = True, *args, **kwargs): - super().__init__(*args, **kwargs) - - _ = make_dir_tree(datetime.now().year, base_path=root_dir) - - self._root_dir = root_dir - self._verbose = verbose - - # Used for the emulated cameras only. - self._img_dir = None - - # Resource/datum docs related variables. - self._asset_docs_cache = deque() - self._resource_document = None - self._datum_factory = None - self.noise.put(noise) - self.limits = [[-0.6, 0.6], [-0.45, 0.45]] - if TEST: - self.mplFig = mpl.figure.Figure() - self.mplFig.subplots_adjust(left=0.15, bottom=0.15, top=0.92) - self.mplAx = self.mplFig.add_subplot(111) - - xv = np.random.rand(400, 300) - self.im = self.mplAx.imshow( - xv.T, - aspect="auto", - origin="lower", - vmin=0, - vmax=1e3, - cmap="jet", - extent=(self.limits[0][0], self.limits[0][1], self.limits[1][0], self.limits[1][1]), - ) - self.counter = 0 - self.beamLine = build_beamline() - - def trigger(self): - super().trigger() - raw_image = self.generate_beam(noise=self.noise.get()) - - current_frame = next(self._counter) - - self._dataset.resize((current_frame + 1, *self.image_shape.get())) - - self._dataset[current_frame, :, :] = raw_image - - datum_document = self._datum_factory(datum_kwargs={"frame": current_frame}) - self._asset_docs_cache.append(("datum", datum_document)) - - stats = get_beam_stats(raw_image) - self.image.put(datum_document["datum_id"]) - - for attr in ["max", "sum", "cen_x", "cen_y", "wid_x", "wid_y"]: - getattr(self, attr).put(stats[attr]) - - return NullStatus() - - def stage(self): - super().stage() - date = datetime.now() - self._assets_dir = date.strftime("%Y/%m/%d") - data_file = f"{new_uid()}.h5" - - self._resource_document, self._datum_factory, _ = compose_resource( - start={"uid": "needed for compose_resource() but will be discarded"}, - spec="HDF5", - root=self._root_dir, - resource_path=str(Path(self._assets_dir) / Path(data_file)), - resource_kwargs={}, - ) - - self._data_file = str(Path(self._resource_document["root"]) / Path(self._resource_document["resource_path"])) - - # now discard the start uid, a real one will be added later - self._resource_document.pop("run_start") - self._asset_docs_cache.append(("resource", self._resource_document)) - - self._h5file_desc = h5py.File(self._data_file, "x") - group = self._h5file_desc.create_group("/entry") - self._dataset = group.create_dataset( - "image", - data=np.full(fill_value=np.nan, shape=(1, *self.image_shape.get())), - maxshape=(None, *self.image_shape.get()), - chunks=(1, *self.image_shape.get()), - dtype="float64", - compression="lzf", - ) - self._counter = itertools.count() - - def unstage(self): - super().unstage() - del self._dataset - self._h5file_desc.close() - self._resource_document = None - self._datum_factory = None - - def collect_asset_docs(self): - items = list(self._asset_docs_cache) - self._asset_docs_cache.clear() - yield from items - - def generate_beam_func(self, noise: bool = True): - nx, ny = self.image_shape.get() - - x = np.linspace(-10, 10, ny) - y = np.linspace(-10, 10, nx) - X, Y = np.meshgrid(x, y) - - x0 = self.parent.kbh_ush.get() - self.parent.kbh_dsh.get() - y0 = self.parent.kbv_usv.get() - self.parent.kbv_dsv.get() - x_width = np.sqrt(0.2 + 5e-1 * (self.parent.kbh_ush.get() + self.parent.kbh_dsh.get() - 1) ** 2) - y_width = np.sqrt(0.1 + 5e-1 * (self.parent.kbv_usv.get() + self.parent.kbv_dsv.get() - 2) ** 2) - - beam = np.exp(-0.5 * (((X - x0) / x_width) ** 4 + ((Y - y0) / y_width) ** 4)) / ( - np.sqrt(2 * np.pi) * x_width * y_width - ) - - mask = X > self.parent.ssa_inboard.get() - mask &= X < self.parent.ssa_outboard.get() - mask &= Y > self.parent.ssa_lower.get() - mask &= Y < self.parent.ssa_upper.get() - mask = sp.ndimage.gaussian_filter(mask.astype(float), sigma=1) - - image = beam * mask - - if noise: - kx = np.fft.fftfreq(n=len(x), d=0.1) - ky = np.fft.fftfreq(n=len(y), d=0.1) - KX, KY = np.meshgrid(kx, ky) - - power_spectrum = 1 / (1e-2 + KX**2 + KY**2) - - white_noise = 1e-3 * np.random.standard_normal(size=X.shape) - pink_noise = 1e-3 * np.real(np.fft.ifft2(power_spectrum * np.fft.fft2(np.random.standard_normal(size=X.shape)))) - # background = 5e-3 * (X - Y) / X.max() - - image += white_noise + pink_noise - - return image - - def generate_beam_xrt(self, noise: bool = True): - R2 = self.parent.kbh_dsh.get() - R1 = self.parent.kbv_dsv.get() - - self.beamLine.toroidMirror01.R = R1 - self.beamLine.toroidMirror02.R = R2 - outDict = run_process(self.beamLine) - lb = outDict["screen01beamLocal01"] - - hist2d, _, _ = build_histRGB(lb, lb, limits=self.limits, isScreen=True, shape=[400, 300]) - image = hist2d - _ = np.max(image) - image += 1e-3 * np.abs(np.random.standard_normal(size=image.shape)) - self.counter += 1 - - return image - - def generate_beam(self, *args, **kwargs): - return self.generate_beam_xrt(*args, **kwargs) - - -class DatabrokerBeamlineEpics(Device): - det = Cpt(DatabrokerxrtEpicsScreen, name="DetectorScreen") - - kbh_ush = Cpt(Signal, kind="hinted") - kbh_dsh = Cpt(EpicsSignal, ":TM_HOR:R", kind="hinted") - kbv_usv = Cpt(Signal, kind="hinted") - kbv_dsv = Cpt(EpicsSignal, ":TM_VERT:R", kind="hinted") - - ssa_inboard = Cpt(Signal, value=-5.0, kind="hinted") - ssa_outboard = Cpt(Signal, value=5.0, kind="hinted") - ssa_lower = Cpt(Signal, value=-5.0, kind="hinted") - ssa_upper = Cpt(Signal, value=5.0, kind="hinted") - - def __init__(self, *args, **kwargs): - self.beamline = build_beamline() - super().__init__(*args, **kwargs) - - -class DatabrokerBeamline(Device): - det = Cpt(DatabrokerDetector) - - kbh_ush = Cpt(Signal, kind="hinted") - kbh_dsh = Cpt(Signal, kind="hinted") - kbv_usv = Cpt(Signal, kind="hinted") - kbv_dsv = Cpt(Signal, kind="hinted") - - ssa_inboard = Cpt(Signal, value=-5.0, kind="hinted") - ssa_outboard = Cpt(Signal, value=5.0, kind="hinted") - ssa_lower = Cpt(Signal, value=-5.0, kind="hinted") - ssa_upper = Cpt(Signal, value=5.0, kind="hinted") - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - -class TiledxrtEpicsScreen(Device): - sum = Cpt(Signal, kind="hinted") - max = Cpt(Signal, kind="normal") - area = Cpt(Signal, kind="normal") - cen_x = Cpt(Signal, kind="hinted") - cen_y = Cpt(Signal, kind="hinted") - wid_x = Cpt(Signal, kind="hinted") - wid_y = Cpt(Signal, kind="hinted") - image = Cpt(EpicsSignal, "BL:Screen1:Array", kind="omitted") - acquire = Cpt(EpicsSignal, "BL:Screen1:Acquire", kind="normal") - image_shape = Cpt(Signal, value=(300, 400), kind="normal") - noise = Cpt(Signal, kind="normal") - - def __init__(self, root_dir: str = "/tmp/blop/sim", verbose: bool = True, noise: bool = True, *args, **kwargs): - _ = make_dir_tree(datetime.now().year, base_path=root_dir) - - self._root_dir = root_dir - self._verbose = verbose - - # Used for the emulated cameras only. - self._img_dir = None - - # Resource/datum docs related variables. - self._asset_docs_cache: deque[tuple[str, dict[str, Any]]] = deque() - self._stream_resource_document: dict[str, Any] | None = None - self._stream_datum_factory: Any | None = None - super().__init__(*args, **kwargs) - - def trigger(self): - super().trigger() - self.acquire.put(1) - while self.acquire.get() > 0: - time.sleep(0.01) - raw_image = self.image.get() - image = raw_image.reshape(*self.image_shape.get()) - - current_frame = next(self._counter) - - self._dataset.resize((current_frame + 1, *self.image_shape.get())) - - self._dataset[current_frame, :, :] = image - - stream_datum_document = self._stream_datum_factory( - StreamRange(start=current_frame, stop=current_frame + 1), - ) - - self._asset_docs_cache.append(("stream_datum", stream_datum_document)) - - stats = get_beam_stats(image) - - for attr in ["max", "sum", "cen_x", "cen_y", "wid_x", "wid_y"]: - getattr(self, attr).put(stats[attr]) - - return NullStatus() - - def _generate_file_path(self, date_template="%Y/%m/%d"): - date = datetime.now() - assets_dir = date.strftime(date_template) - data_file = f"{new_uid()}.h5" - return Path(self._root_dir) / Path(assets_dir) / Path(data_file) - - def stage(self): - devices = super().unstage() - full_path = self._generate_file_path() - image_shape = self.image_shape.get() - uri = f"file://localhost/{str(full_path).strip('/')}" - ( - self._stream_resource_document, - self._stream_datum_factory, - ) = compose_stream_resource( - mimetype="application/x-hdf5", - uri=uri, - data_key=self.image.name, - parameters={ - "chunk_shape": (1, *image_shape), - "dataset": "/entry/image", - }, - ) - - self._data_file = full_path - - self._asset_docs_cache.append(("stream_resource", self._stream_resource_document)) - - self._h5file_desc = h5py.File(self._data_file, "x") - group = self._h5file_desc.create_group("/entry") - self._dataset = group.create_dataset( - "image", - data=np.full(fill_value=np.nan, shape=(1, *self.image_shape.get())), - maxshape=(None, *self.image_shape.get()), - chunks=(1, *self.image_shape.get()), - dtype="float64", - compression="lzf", - ) - self._counter = itertools.count() - return devices - - def unstage(self) -> list[Any]: - devices = super().unstage() - del self._dataset - if self._h5file_desc: - self._h5file_desc.close() - self._resource_document = None - self._datum_factory = None - return devices - - -class TiledDetector(Device): - sum = Cpt(Signal, kind=Kind.hinted) - max = Cpt(Signal, kind=Kind.normal) - area = Cpt(Signal, kind=Kind.normal) - cen_x = Cpt(Signal, kind=Kind.hinted) - cen_y = Cpt(Signal, kind=Kind.hinted) - wid_x = Cpt(Signal, kind=Kind.hinted) - wid_y = Cpt(Signal, kind=Kind.hinted) - image = Cpt(ExternalFileReference, kind=Kind.omitted) - image_shape = Cpt(Signal, value=(300, 400), kind=Kind.omitted) - noise = Cpt(Signal, kind=Kind.normal) - - def __init__(self, root_dir: str = "/tmp/blop/sim", verbose: bool = True, noise: bool = True, *args, **kwargs): - super().__init__(*args, **kwargs) - - _ = make_dir_tree(datetime.now().year, base_path=root_dir) - - self._root_dir = root_dir - self._verbose = verbose - - # Used for the emulated cameras only. - self._img_dir = None - - # Resource/datum docs related variables. - self._asset_docs_cache: deque[tuple[str, dict[str, Any]]] = deque() - self._stream_resource_document: dict[str, Any] | None = None - self._stream_datum_factory: Any | None = None - self._dataset: h5py.Dataset | None = None - - self.noise.put(noise) - self.limits = [[-0.6, 0.6], [-0.45, 0.45]] - if TEST: - self.mplFig = mpl.figure.Figure() - self.mplFig.subplots_adjust(left=0.15, bottom=0.15, top=0.92) - self.mplAx = self.mplFig.add_subplot(111) - - xv = np.random.rand(400, 300) - self.im = self.mplAx.imshow( - xv.T, - aspect="auto", - origin="lower", - vmin=0, - vmax=1e3, - cmap="jet", - extent=(self.limits[0][0], self.limits[0][1], self.limits[1][0], self.limits[1][1]), - ) - self.counter = 0 - self.beamLine = build_beamline() - - def trigger(self): - super().trigger() - - raw_image = self.generate_beam(noise=self.noise.get()) - - current_frame = next(self._counter) - - self._dataset.resize((current_frame + 1, *self.image_shape.get())) - - self._dataset[current_frame, :, :] = raw_image - - stream_datum_document = self._stream_datum_factory( - StreamRange(start=current_frame, stop=current_frame + 1), - ) - self._asset_docs_cache.append(("stream_datum", stream_datum_document)) - - stats = get_beam_stats(raw_image) - - for attr in ["max", "sum", "cen_x", "cen_y", "wid_x", "wid_y"]: - getattr(self, attr).put(stats[attr]) - - super().trigger() - return NullStatus() - - def _generate_file_path(self, date_template="%Y/%m/%d"): - date = datetime.now() - assets_dir = date.strftime(date_template) - data_file = f"{new_uid()}.h5" - - return Path(self._root_dir) / Path(assets_dir) / Path(data_file) - - def stage(self): - super().stage() - - self._asset_docs_cache.clear() - full_path = self._generate_file_path() - image_shape = self.image_shape.get() - - uri = f"file://localhost/{str(full_path).strip('/')}" - - ( - self._stream_resource_document, - self._stream_datum_factory, - ) = compose_stream_resource( - mimetype="application/x-hdf5", - uri=uri, - data_key=self.image.name, - parameters={ - "chunk_shape": (1, *image_shape), - "dataset": "/entry/image", - }, - ) - - self._data_file = full_path - self._asset_docs_cache.append(("stream_resource", self._stream_resource_document)) - - self._h5file_desc = h5py.File(self._data_file, "x") - group = self._h5file_desc.create_group("/entry") - self._dataset = group.create_dataset( - "image", - data=np.full(fill_value=np.nan, shape=(1, *image_shape)), - maxshape=(None, *image_shape), - chunks=(1, *self.image_shape.get()), - dtype="float64", - compression="lzf", - ) - - self._counter = itertools.count() - - def unstage(self): - super().unstage() - # del self._dataset - self._h5file_desc.close() - self._stream_resource_document = None - self._stream_datum_factory = None - - def describe(self): - res = super().describe() - res[self.image.name] = { - "shape": [1, *self.image_shape.get()], - "external": "STREAM:", - "source": "sim", - "dtype": "array", - "dtype_numpy": np.dtype(np.float64).str, - } # self.parent.ssa_inboard.get() - mask &= X < self.parent.ssa_outboard.get() - mask &= Y > self.parent.ssa_lower.get() - mask &= Y < self.parent.ssa_upper.get() - mask = sp.ndimage.gaussian_filter(mask.astype(float), sigma=1) - - image = beam * mask - - if noise: - kx = np.fft.fftfreq(n=len(x), d=0.1) - ky = np.fft.fftfreq(n=len(y), d=0.1) - KX, KY = np.meshgrid(kx, ky) - - power_spectrum = 1 / (1e-2 + KX**2 + KY**2) - - white_noise = 1e-3 * np.random.standard_normal(size=X.shape) - pink_noise = 1e-3 * np.real(np.fft.ifft2(power_spectrum * np.fft.fft2(np.random.standard_normal(size=X.shape)))) - # background = 5e-3 * (X - Y) / X.max() - - image += white_noise + pink_noise - return image - - def generate_beam_xrt(self, noise: bool = True): - R2 = self.parent.kbh_dsh.get() - R1 = self.parent.kbv_dsv.get() - - self.beamLine.toroidMirror01.R = R1 - self.beamLine.toroidMirror02.R = R2 - outDict = run_process(self.beamLine) - lb = outDict["screen01beamLocal01"] - - hist2d, _, _ = build_histRGB(lb, lb, limits=self.limits, isScreen=True, shape=[400, 300]) - image = hist2d - _ = np.max(image) - image += 1e-3 * np.abs(np.random.standard_normal(size=image.shape)) - self.counter += 1 - return image - - def generate_beam(self, *args, **kwargs): - return self.generate_beam_xrt(*args, **kwargs) - - -class TiledBeamlineEpics(Device): - det = Cpt(TiledxrtEpicsScreen, name="DetectorScreen") - - kbh_ush = Cpt(Signal, kind="hinted") - kbh_dsh = Cpt(EpicsSignal, ":TM_HOR:R", kind="hinted") - kbv_usv = Cpt(Signal, kind="hinted") - kbv_dsv = Cpt(EpicsSignal, ":TM_VERT:R", kind="hinted") - - ssa_inboard = Cpt(Signal, value=-5.0, kind="hinted") - ssa_outboard = Cpt(Signal, value=5.0, kind="hinted") - ssa_lower = Cpt(Signal, value=-5.0, kind="hinted") - ssa_upper = Cpt(Signal, value=5.0, kind="hinted") - - def __init__(self, *args, **kwargs): - self.beamline = build_beamline() - super().__init__(*args, **kwargs) - - -class TiledBeamline(Device): - det = Cpt(TiledDetector) - - kbh_ush = Cpt(Signal, kind="hinted") - kbh_dsh = Cpt(Signal, kind="hinted") - kbv_usv = Cpt(Signal, kind="hinted") - kbv_dsv = Cpt(Signal, kind="hinted") - - ssa_inboard = Cpt(Signal, value=-5.0, kind="hinted") - ssa_outboard = Cpt(Signal, value=5.0, kind="hinted") - ssa_lower = Cpt(Signal, value=-5.0, kind="hinted") - ssa_upper = Cpt(Signal, value=5.0, kind="hinted") - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) diff --git a/src/blop/tests/unit/ax/__init__.py b/src/blop/tests/__init__.py similarity index 100% rename from src/blop/tests/unit/ax/__init__.py rename to src/blop/tests/__init__.py diff --git a/src/blop/tests/ax/__init__.py b/src/blop/tests/ax/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/blop/tests/unit/ax/test_agent.py b/src/blop/tests/ax/test_agent.py similarity index 89% rename from src/blop/tests/unit/ax/test_agent.py rename to src/blop/tests/ax/test_agent.py index 0580ca30..9f67f821 100644 --- a/src/blop/tests/unit/ax/test_agent.py +++ b/src/blop/tests/ax/test_agent.py @@ -135,6 +135,28 @@ def test_agent_suggest_multiple(mock_evaluation_function): assert isinstance(parameterizations[i]["test_movable2"], (int, float)) +def test_agent_suggest_fixed_dofs(mock_evaluation_function): + movable1 = MovableSignal(name="test_movable1") + movable2 = MovableSignal(name="test_movable2") + dof1 = RangeDOF(actuator=movable1, bounds=(0, 10), parameter_type="float") + dof2 = RangeDOF(actuator=movable2, bounds=(0, 10), parameter_type="float") + objective = Objective(name="test_objective", minimize=False) + agent = Agent( + sensors=[], + dofs=[dof1, dof2], + objectives=[objective], + evaluation_function=mock_evaluation_function, + ) + with pytest.raises(ValueError): + agent.fixed_dofs = {"test_movable1": 3, dof2: 4} + agent.fixed_dofs = {dof2: 4} + parameterizations = agent.suggest(5) + for i in range(5): + assert "test_movable2" in parameterizations[i] + if i != 0: # first trial will default to CenterOfSearchSpace and override any fixed parameters + assert parameterizations[i]["test_movable2"] == 4 + + def test_agent_ingest(mock_evaluation_function): movable1 = MovableSignal(name="test_movable1") movable2 = MovableSignal(name="test_movable2") diff --git a/src/blop/tests/unit/ax/test_dof.py b/src/blop/tests/ax/test_dof.py similarity index 100% rename from src/blop/tests/unit/ax/test_dof.py rename to src/blop/tests/ax/test_dof.py diff --git a/src/blop/tests/unit/ax/test_objective.py b/src/blop/tests/ax/test_objective.py similarity index 100% rename from src/blop/tests/unit/ax/test_objective.py rename to src/blop/tests/ax/test_objective.py diff --git a/src/blop/tests/unit/ax/test_optimizer.py b/src/blop/tests/ax/test_optimizer.py similarity index 89% rename from src/blop/tests/unit/ax/test_optimizer.py rename to src/blop/tests/ax/test_optimizer.py index 8ec67772..543c0feb 100644 --- a/src/blop/tests/unit/ax/test_optimizer.py +++ b/src/blop/tests/ax/test_optimizer.py @@ -24,6 +24,23 @@ def test_ax_optimizer_init(): optimizer.ax_client.configure_experiment(parameters) +def test_ax_fixed_parameters(): + optimizer = AxOptimizer( + parameters=[ + RangeParameterConfig(name="x1", bounds=(-5.0, 5.0), parameter_type="float"), + RangeParameterConfig(name="x2", bounds=(-5.0, 5.0), parameter_type="float"), + ChoiceParameterConfig(name="x3", values=[0, 1, 2, 3, 4, 5], parameter_type="int", is_ordered=True), + ], + objective="y1,-y2", + parameter_constraints=["x1 + x2 <= 10"], + outcome_constraints=["y1 >= 0", "y2 <= 0"], + ) + optimizer.fixed_parameters = {"x3": 3} + assert optimizer.fixed_parameters == {"x3": 3} + with pytest.raises(KeyError): + optimizer.fixed_parameters = {"x4": 3} + + def test_ax_optimizer_suggest(): optimizer = AxOptimizer( parameters=[ diff --git a/src/blop/tests/unit/conftest.py b/src/blop/tests/conftest.py similarity index 100% rename from src/blop/tests/unit/conftest.py rename to src/blop/tests/conftest.py diff --git a/src/blop/tests/integration/conftest.py b/src/blop/tests/integration/conftest.py deleted file mode 100644 index 007e897d..00000000 --- a/src/blop/tests/integration/conftest.py +++ /dev/null @@ -1,37 +0,0 @@ -import asyncio -import logging - -import pytest -from bluesky.callbacks.tiled_writer import TiledWriter -from bluesky.run_engine import RunEngine -from tiled.client import from_uri -from tiled.server.simple import SimpleTiledServer - -logger = logging.getLogger("blop") -logger.setLevel(logging.DEBUG) -logging.getLogger("httpx").setLevel(logging.WARNING) - - -@pytest.fixture(scope="function") -def setup(): - """Returns the tiled client as the default backend for all tests.""" - server = SimpleTiledServer(readable_storage=["/tmp/blop/sim"]) - client = from_uri(server.uri) - yield client - server.close() - - -@pytest.fixture(scope="function") -def db_callback(setup): - """Returns the TiledWriter callback for the default tiled backend.""" - return TiledWriter(setup) - - -@pytest.fixture(scope="function") -def RE(db_callback): - """Sets up the RunEngine with the correct callback.""" - loop = asyncio.new_event_loop() - loop.set_debug(True) - RE = RunEngine({}, loop=loop) - RE.subscribe(db_callback) - return RE diff --git a/src/blop/tests/integration/test_ax_agent.py b/src/blop/tests/integration/test_ax_agent.py deleted file mode 100644 index d0db6345..00000000 --- a/src/blop/tests/integration/test_ax_agent.py +++ /dev/null @@ -1,51 +0,0 @@ -from blop.ax.agent import Agent -from blop.ax.dof import RangeDOF -from blop.ax.objective import Objective -from blop.sim.beamline import TiledBeamline - - -def test_ax_agent_sim_beamline(RE, setup): - beamline = TiledBeamline(name="bl") - beamline.det.noise.put(False) - - dofs = [ - RangeDOF(actuator=beamline.kbv_dsv, bounds=(-5.0, 5.0), parameter_type="float"), - RangeDOF(actuator=beamline.kbv_usv, bounds=(-5.0, 5.0), parameter_type="float"), - RangeDOF(actuator=beamline.kbh_dsh, bounds=(-5.0, 5.0), parameter_type="float"), - RangeDOF(actuator=beamline.kbh_ush, bounds=(-5.0, 5.0), parameter_type="float"), - ] - - objectives = [ - Objective(name="bl_det_sum", minimize=False), - Objective(name="bl_det_wid_x", minimize=True), - Objective(name="bl_det_wid_y", minimize=True), - ] - - def evaluation_function(uid: str, suggestions: list[dict]) -> list[dict]: - run = setup[uid] - - bl_det_sums = run["primary/bl_det_sum"].read() - bl_det_wid_x = run["primary/bl_det_wid_x"].read() - bl_det_wid_y = run["primary/bl_det_wid_y"].read() - - trial_ids = [suggestion["_id"] for suggestion in run.metadata["start"]["blop_suggestions"]] - outcomes = [] - for suggestion in suggestions: - idx = trial_ids.index(suggestion["_id"]) - outcome = { - "_id": suggestion["_id"], - "bl_det_sum": bl_det_sums[idx], - "bl_det_wid_x": bl_det_wid_x[idx], - "bl_det_wid_y": bl_det_wid_y[idx], - } - outcomes.append(outcome) - - return outcomes - - agent = Agent( - sensors=[beamline.det], - dofs=dofs, - objectives=objectives, - evaluation_function=evaluation_function, - ) - RE(agent.optimize(iterations=12, n_points=1)) diff --git a/src/blop/tests/unit/test_plans.py b/src/blop/tests/test_plans.py similarity index 93% rename from src/blop/tests/unit/test_plans.py rename to src/blop/tests/test_plans.py index 4346422e..04d0b084 100644 --- a/src/blop/tests/unit/test_plans.py +++ b/src/blop/tests/test_plans.py @@ -1,11 +1,11 @@ -from unittest.mock import MagicMock, Mock, patch +from unittest.mock import MagicMock, patch import bluesky.plan_stubs as bps import pytest from bluesky.run_engine import RunEngine from bluesky.utils import plan -from blop.plans import acquire_baseline, acquire_with_background, default_acquire, optimize, optimize_step +from blop.plans import acquire_baseline, default_acquire, optimize, optimize_step from blop.protocols import AcquisitionPlan, Checkpointable, EvaluationFunction, OptimizationProblem, Optimizer from .conftest import MovableSignal, ReadableSignal @@ -394,39 +394,6 @@ def test_default_acquire_multiple_movables_readables(RE): assert movable2.read()["x2"]["value"] == 0.1 -def test_acquire_with_background(RE): - """Test background acquisition with multiple movables, positions, and readables""" - - def block_beam(): - yield from bps.null() - - def unblock_beam(): - yield from bps.null() - - movable = MovableSignal("x1", initial_value=-1.0) - readable = ReadableSignal("objective") - - mock_block_beam = Mock(wraps=block_beam) - mock_unblock_beam = Mock(wraps=unblock_beam) - - with patch.object(readable, "read", wraps=readable.read) as mock_read: - RE( - acquire_with_background( - [{"x1": 0.0, "_id": 0}], - [movable], - sensors=[readable], - block_beam=mock_block_beam, - unblock_beam=mock_unblock_beam, - ) - ) - # Two reads, one blocked, one unblocked - assert mock_read.call_count == 2 - assert mock_block_beam.call_count == 1 - assert mock_unblock_beam.call_count == 1 - - assert movable.read()["x1"]["value"] == 0.0 - - def test_acquire_baseline(RE): """Test acquiring a baseline reading from suggested parameterizations.""" optimizer = MagicMock(spec=Optimizer) diff --git a/src/blop/tests/unit/test_utils.py b/src/blop/tests/test_utils.py similarity index 97% rename from src/blop/tests/unit/test_utils.py rename to src/blop/tests/test_utils.py index 1edfaa99..8a237928 100644 --- a/src/blop/tests/unit/test_utils.py +++ b/src/blop/tests/test_utils.py @@ -1,7 +1,7 @@ import numpy as np -from blop.plans.utils import InferredReadable, get_route_index, route_suggestions from blop.protocols import ID_KEY +from blop.utils import InferredReadable, get_route_index, route_suggestions # InferredReadable tests diff --git a/src/blop/plans/utils.py b/src/blop/utils.py similarity index 79% rename from src/blop/plans/utils.py rename to src/blop/utils.py index 05a95aef..3019bb40 100644 --- a/src/blop/plans/utils.py +++ b/src/blop/utils.py @@ -8,7 +8,7 @@ from event_model import DataKey from numpy.typing import ArrayLike -from ..protocols import ID_KEY +from .protocols import ID_KEY, OptimizationProblem def _infer_data_key(value: ArrayLike) -> DataKey: @@ -133,3 +133,24 @@ def route_suggestions(suggestions: list[dict], starting_position: dict | None = starting_point = np.array([starting_position[dim] for dim in dims_to_route]) if starting_position else None return [suggestions[i] for i in get_route_index(points=points, starting_point=starting_point)] + + +def collect_optimization_metadata(optimization_problem: OptimizationProblem) -> dict[str, Any]: + """ + Collect the metadata for the optimization problem. + """ + if hasattr(optimization_problem.evaluation_function, "__name__"): + evaluation_function_name = optimization_problem.evaluation_function.__name__ # type: ignore[attr-defined] + else: + evaluation_function_name = optimization_problem.evaluation_function.__class__.__name__ + if hasattr(optimization_problem.acquisition_plan, "__name__"): + acquisition_plan_name = optimization_problem.acquisition_plan.__name__ # type: ignore[attr-defined] + else: + acquisition_plan_name = optimization_problem.acquisition_plan.__class__.__name__ + return { + "evaluation_function": evaluation_function_name, + "acquisition_plan": acquisition_plan_name, + "optimizer": optimization_problem.optimizer.__class__.__name__, + "sensors": [sensor.name for sensor in optimization_problem.sensors], + "actuators": [actuator.name for actuator in optimization_problem.actuators], + }