From 089d75caffaf1ba31b6ae4dfa5e78f79f0c11996 Mon Sep 17 00:00:00 2001 From: R Hyde Date: Sat, 14 Feb 2026 11:52:26 +0000 Subject: [PATCH 1/4] feat/docker: Initial attempt at working with docker. - I had to change providers.toml to get this to work. Not ideal. --- .dockerignore | 46 +++++++++++++++++++++++++++++++++++++++ Dockerfile | 33 ++++++++++++++++++++++++++++ docker-compose.yml | 21 ++++++++++++++++++ src/agentc/providers.toml | 2 +- 4 files changed, 101 insertions(+), 1 deletion(-) create mode 100644 .dockerignore create mode 100644 Dockerfile create mode 100644 docker-compose.yml diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..b3a8191 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,46 @@ +# Git +.git +.gitignore + +# Python +__pycache__ +*.py[cod] +*$py.class +*.so +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# Virtual Environment +.venv +venv/ +ENV/ +env/ + +# Tool specific +.vscode/ +.idea/ +.mypy_cache/ +.ruff_cache/ +.pytest_cache/ +.coverage +htmlcov/ + +# Local configuration +.env +.agentc/ diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..0c0d88c --- /dev/null +++ b/Dockerfile @@ -0,0 +1,33 @@ +FROM python:3.13-slim + +# Install uv +COPY --from=ghcr.io/astral-sh/uv:latest /uv /bin/uv + +# Include git for version info if needed, though strictly not required for runtime unless the tool uses git +RUN apt-get update && apt-get install -y git && rm -rf /var/lib/apt/lists/* + +WORKDIR /app + +# Enable bytecode compilation +ENV UV_COMPILE_BYTECODE=1 + +# Copy dependency definition to cache dependencies +COPY pyproject.toml uv.lock ./ + +# Install dependencies +RUN uv sync --frozen --no-install-project --no-dev + +# Copy the project +COPY . . + +# Install the project +RUN uv sync --frozen --no-dev + +# Place the virtual environment in the path +ENV PATH="/app/.venv/bin:$PATH" + +# Set the working directory for the user's workspace +WORKDIR /workspace + +# Default entrypoint +ENTRYPOINT ["agent-c"] diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..50a8794 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,21 @@ +services: + agent-c: + build: . + image: agent-c:latest + container_name: agent-c + # Mount the current directory to /workspace so Agent C can edit files here + volumes: + - .:/workspace + # Optional: Persist agent memory/history if strictly needed, though usually it writes to files in workspace + # - agent-data:/root/.local/share/agentc + environment: + # Pass through API keys + - ANTHROPIC_API_KEY + - OPENAI_API_KEY + + # Allow accessing the host machine via host.docker.internal on Linux too + extra_hosts: + - "host.docker.internal:host-gateway" + # Enable TTY and stdin for Textual UI + stdin_open: true + tty: true diff --git a/src/agentc/providers.toml b/src/agentc/providers.toml index e31bb3c..8ee0cbc 100644 --- a/src/agentc/providers.toml +++ b/src/agentc/providers.toml @@ -22,7 +22,7 @@ api_key_env = "MISTRAL_API_KEY" [backends.ollama] provider_cls = "pydantic_ai.providers.ollama.OllamaProvider" model_cls = "pydantic_ai.models.openai.OpenAIChatModel" -base_url = "http://localhost:11434/v1" +base_url = "http://host.docker.internal:11434/v1" [backends.openai] provider_cls = "pydantic_ai.providers.openai.OpenAIProvider" From 8e9bdf3524789b1703fd647b359a9d2330810eac Mon Sep 17 00:00:00 2001 From: R Hyde Date: Sat, 14 Feb 2026 12:40:13 +0000 Subject: [PATCH 2/4] feat/docker: Make it work both ways. --- docker-compose.yml | 1 + .../core/backends/pydantic_ai/provider_loader.py | 12 +++++++++++- src/agentc/core/config_types.py | 2 ++ src/agentc/providers.toml | 3 ++- tests/core/test_provider_loader.py | 8 +++++++- 5 files changed, 23 insertions(+), 3 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 50a8794..213ca30 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -12,6 +12,7 @@ services: # Pass through API keys - ANTHROPIC_API_KEY - OPENAI_API_KEY + - OLLAMA_HOST=http://host.docker.internal:11434/v1 # Allow accessing the host machine via host.docker.internal on Linux too extra_hosts: diff --git a/src/agentc/core/backends/pydantic_ai/provider_loader.py b/src/agentc/core/backends/pydantic_ai/provider_loader.py index f1bfb85..9961683 100644 --- a/src/agentc/core/backends/pydantic_ai/provider_loader.py +++ b/src/agentc/core/backends/pydantic_ai/provider_loader.py @@ -40,6 +40,7 @@ def _load_single_file(path: Path) -> tuple[dict[str, BackendConfig], dict[str, M provider_cls_path=config["provider_cls"], model_cls_path=config["model_cls"], api_key_env=config.get("api_key_env"), + base_url_env=config.get("base_url_env"), base_url=config.get("base_url"), ) @@ -56,6 +57,7 @@ def _load_single_file(path: Path) -> tuple[dict[str, BackendConfig], dict[str, M backend=config["backend"], model_name=config["model_name"], api_key_env=config.get("api_key_env"), + base_url_env=config.get("base_url_env"), base_url=config.get("base_url"), params=params, ) @@ -135,7 +137,15 @@ def build_model( model_cls = get_class(backend_config.model_cls_path) api_key_env = model_config.api_key_env or backend_config.api_key_env - base_url = model_config.base_url or backend_config.base_url + base_url_env = model_config.base_url_env or backend_config.base_url_env + base_url_from_env: str | None = None + if base_url_env: + base_url_from_env = os.getenv(base_url_env) + + if base_url_from_env is not None: + base_url = base_url_from_env + else: + base_url = model_config.base_url or backend_config.base_url provider_kwargs: dict[str, Any] = {} if api_key_env: diff --git a/src/agentc/core/config_types.py b/src/agentc/core/config_types.py index 02c7eec..40e07d4 100644 --- a/src/agentc/core/config_types.py +++ b/src/agentc/core/config_types.py @@ -18,6 +18,7 @@ class BackendConfig: provider_cls_path: str # e.g., "pydantic_ai.providers.anthropic.AnthropicProvider" model_cls_path: str # e.g., "pydantic_ai.models.anthropic.AnthropicModel" api_key_env: str | None = None + base_url_env: str | None = None base_url: str | None = None @@ -29,6 +30,7 @@ class ModelConfig: backend: str model_name: str api_key_env: str | None = None + base_url_env: str | None = None base_url: str | None = None params: dict[str, Any] = field(default_factory=dict) diff --git a/src/agentc/providers.toml b/src/agentc/providers.toml index 8ee0cbc..3b70300 100644 --- a/src/agentc/providers.toml +++ b/src/agentc/providers.toml @@ -22,7 +22,8 @@ api_key_env = "MISTRAL_API_KEY" [backends.ollama] provider_cls = "pydantic_ai.providers.ollama.OllamaProvider" model_cls = "pydantic_ai.models.openai.OpenAIChatModel" -base_url = "http://host.docker.internal:11434/v1" +base_url_env = "OLLAMA_HOST" +base_url = "http://localhost:11434/v1" [backends.openai] provider_cls = "pydantic_ai.providers.openai.OpenAIProvider" diff --git a/tests/core/test_provider_loader.py b/tests/core/test_provider_loader.py index ddd9b0b..6c2cfcf 100644 --- a/tests/core/test_provider_loader.py +++ b/tests/core/test_provider_loader.py @@ -49,6 +49,7 @@ def test_load_providers_merges_with_priority(tmp_path: Path) -> None: provider_cls = "pydantic_ai.providers.ollama.OllamaProvider" model_cls = "pydantic_ai.models.openai.OpenAIChatModel" base_url = "http://low" +base_url_env = "LOW_BASE_URL" [backends.other] provider_cls = "OtherProvider" @@ -72,6 +73,7 @@ def test_load_providers_merges_with_priority(tmp_path: Path) -> None: provider_cls = "pydantic_ai.providers.ollama.OllamaProvider" model_cls = "pydantic_ai.models.openai.OpenAIChatModel" base_url = "http://high" +base_url_env = "HIGH_BASE_URL" [models.local] backend = "ollama" @@ -84,6 +86,7 @@ def test_load_providers_merges_with_priority(tmp_path: Path) -> None: backends, models = load_providers([high_dir, low_dir]) assert backends["ollama"].base_url == "http://high" + assert backends["ollama"].base_url_env == "HIGH_BASE_URL" assert models["local"].model_name == "ollama-high" assert models["local"].params["temperature"] == 0.1 @@ -100,6 +103,7 @@ def test_build_model_prefers_model_overrides(monkeypatch: pytest.MonkeyPatch) -> provider_cls_path="provider.Path", model_cls_path="model.Path", api_key_env="BACKEND_KEY", + base_url_env="BACKEND_URL", base_url="http://backend", ) model = ModelConfig( @@ -107,6 +111,7 @@ def test_build_model_prefers_model_overrides(monkeypatch: pytest.MonkeyPatch) -> backend="backend", model_name="model-str", api_key_env="MODEL_KEY", + base_url_env="MODEL_URL", base_url="http://model", params={"temperature": 0.3}, ) @@ -123,13 +128,14 @@ def get_class_side_effect(path: str) -> MagicMock: monkeypatch.setenv("MODEL_KEY", "model-secret") monkeypatch.setenv("BACKEND_KEY", "backend-secret") + monkeypatch.setenv("MODEL_URL", "http://env-model") with patch("agentc.core.backends.pydantic_ai.provider_loader._get_class", side_effect=get_class_side_effect): provider, _ = build_model(model, backend) mock_provider_cls.assert_called_once_with( api_key="model-secret", - base_url="http://model", + base_url="http://env-model", ) mock_model_cls.assert_called_once() kwargs = mock_model_cls.call_args.kwargs From a9cccc24bcdf4d855c31d98a3b386cfa226e7683 Mon Sep 17 00:00:00 2001 From: R Hyde Date: Sat, 14 Feb 2026 12:42:34 +0000 Subject: [PATCH 3/4] feat/docker: Update docs. --- AGENTS.md | 7 +++++++ ARCHITECTURE.md | 1 + README.md | 12 ++++++++++++ 3 files changed, 20 insertions(+) diff --git a/AGENTS.md b/AGENTS.md index 34d71c6..c29f27d 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -18,6 +18,7 @@ ``` - **Run Agent C (Textual UI)**: `uv run agent-c` - **Run Console UI**: `uv run run-console` +- **Run in Docker**: `docker compose up --build` - **Test all**: `uv run pytest` - **Test agentc core**: `uv run pytest tests/core/ tests/middleware/ tests/adapters/` - **Type check**: `uv run mypy` @@ -31,6 +32,12 @@ uv sync uv run agent-c # Launch the Textual UI ``` +## Docker Quickstart + +```powershell +docker compose up --build +``` + ## Architecture Overview > For visual architecture diagrams, see the [Architecture section in README.md](README.md#architecture). diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md index a47f6bc..82b3019 100644 --- a/ARCHITECTURE.md +++ b/ARCHITECTURE.md @@ -203,6 +203,7 @@ Entries from earlier locations override those with the same name later. [backends.ollama] provider_cls = "pydantic_ai.providers.ollama.OllamaProvider" model_cls = "pydantic_ai.models.openai.OpenAIChatModel" +base_url_env = "OLLAMA_HOST" base_url = "http://localhost:11434/v1" [models.ollama-gpt-oss-120b] diff --git a/README.md b/README.md index 1b7efe2..dbebf9e 100644 --- a/README.md +++ b/README.md @@ -47,6 +47,16 @@ Type commands or questions. Exit with "/quit" or "/exit". For other providers (OpenAI, Anthropic), see **Configuration** below. +### 4. Run in Docker (optional) + +```bash +docker compose up --build +``` + +This runs the Textual UI in the container. The compose file sets `OLLAMA_HOST` to +`http://host.docker.internal:11434/v1` so the container can reach a host Ollama. +Override it if your Ollama host runs elsewhere. + ## Installation ### For Development @@ -98,6 +108,7 @@ Create `.agentc/providers.toml` in your project or `~/.agentc/providers.toml` in [backends.my-custom-ollama] provider_cls = "pydantic_ai.providers.ollama.OllamaProvider" model_cls = "pydantic_ai.models.openai.OpenAIChatModel" +base_url_env = "OLLAMA_HOST" base_url = "http://localhost:11434/v1" [backends.openai] @@ -119,6 +130,7 @@ model_name = "gpt-4o" - `model_cls`: Full Python path to the model class - `model_name`: Model identifier (e.g., `gpt-4o`, `deepseek-r1:32b`) - `api_key_env`: (Optional) Environment variable name for API key +- `base_url_env`: (Optional) Environment variable name for base URL overrides - `base_url`: (Optional) Custom base URL for the backend - `params`: (Optional) Keyword arguments forwarded to the model constructor (e.g., `temperature`) From 47d9dcf65cd84266883075ead0146e7d98a9db3c Mon Sep 17 00:00:00 2001 From: R Hyde Date: Sat, 14 Feb 2026 12:46:03 +0000 Subject: [PATCH 4/4] feat/docker: Fix tests and lints. --- .../backends/pydantic_ai/provider_loader.py | 1 + .../test_session_factory_github_copilot.py | 43 ++++++++++--------- 2 files changed, 24 insertions(+), 20 deletions(-) diff --git a/src/agentc/core/backends/pydantic_ai/provider_loader.py b/src/agentc/core/backends/pydantic_ai/provider_loader.py index 9961683..eb04b83 100644 --- a/src/agentc/core/backends/pydantic_ai/provider_loader.py +++ b/src/agentc/core/backends/pydantic_ai/provider_loader.py @@ -142,6 +142,7 @@ def build_model( if base_url_env: base_url_from_env = os.getenv(base_url_env) + base_url: str | None if base_url_from_env is not None: base_url = base_url_from_env else: diff --git a/tests/core/test_session_factory_github_copilot.py b/tests/core/test_session_factory_github_copilot.py index 2272448..5d4ba00 100644 --- a/tests/core/test_session_factory_github_copilot.py +++ b/tests/core/test_session_factory_github_copilot.py @@ -1,10 +1,13 @@ """Tests for GhCopilotSessionFactory.""" from pathlib import Path +from typing import cast from unittest.mock import MagicMock, AsyncMock import pytest +from copilot.types import SessionConfig as CopilotSessionConfig + from agentc.core.backends.github_copilot.loop import GhAgentSession from agentc.core.backends.github_copilot.session_factory import GhCopilotSessionFactory from agentc.core.command_types import SessionConfig @@ -20,13 +23,13 @@ async def test_gh_session_factory_creates_session() -> None: mock_client.create_session = AsyncMock(return_value=mock_copilot_session) # Create base config - base_config = { + base_config = cast(CopilotSessionConfig, { "model": "gpt-4", "skill_directories": ["/default/skills"], "streaming": True, "system_message": "You are a helpful assistant", "on_permission_request": None, - } + }) factory = GhCopilotSessionFactory(mock_client, base_config) config = SessionConfig( @@ -49,13 +52,13 @@ async def test_gh_session_factory_with_model_override() -> None: mock_copilot_session.destroy = AsyncMock() mock_client.create_session = AsyncMock(return_value=mock_copilot_session) - base_config = { + base_config = cast(CopilotSessionConfig, { "model": "gpt-4", "skill_directories": ["/default/skills"], "streaming": True, "system_message": "You are a helpful assistant", "on_permission_request": None, - } + }) factory = GhCopilotSessionFactory(mock_client, base_config) config = SessionConfig( @@ -81,13 +84,13 @@ async def test_gh_session_factory_with_skill_dirs_override() -> None: mock_copilot_session.destroy = AsyncMock() mock_client.create_session = AsyncMock(return_value=mock_copilot_session) - base_config = { + base_config = cast(CopilotSessionConfig, { "model": "gpt-4", "skill_directories": ["/default/skills"], "streaming": True, "system_message": "You are a helpful assistant", "on_permission_request": None, - } + }) factory = GhCopilotSessionFactory(mock_client, base_config) custom_skill_dirs = [Path("/custom/skills"), Path("/another/skills")] @@ -115,13 +118,13 @@ async def test_gh_session_factory_uses_base_config_defaults() -> None: mock_copilot_session.destroy = AsyncMock() mock_client.create_session = AsyncMock(return_value=mock_copilot_session) - base_config = { + base_config = cast(CopilotSessionConfig, { "model": "gpt-4-turbo", "skill_directories": ["/base/skills"], "streaming": True, "system_message": "Base system message", "on_permission_request": MagicMock(), - } + }) factory = GhCopilotSessionFactory(mock_client, base_config) config = SessionConfig( @@ -157,13 +160,13 @@ async def test_gh_session_factory_destroys_old_session() -> None: mock_client.create_session = AsyncMock(side_effect=[first_session, second_session]) - base_config = { + base_config = cast(CopilotSessionConfig, { "model": "gpt-4", "skill_directories": [], "streaming": True, "system_message": "Test", "on_permission_request": None, - } + }) factory = GhCopilotSessionFactory(mock_client, base_config) config = SessionConfig(model_name=None, clear_history=True) @@ -193,13 +196,13 @@ async def test_gh_session_factory_handles_destroy_error() -> None: mock_client.create_session = AsyncMock(side_effect=[first_session, second_session]) - base_config = { + base_config = cast(CopilotSessionConfig, { "model": "gpt-4", "skill_directories": [], "streaming": True, "system_message": "Test", "on_permission_request": None, - } + }) factory = GhCopilotSessionFactory(mock_client, base_config) config = SessionConfig(model_name=None, clear_history=True) @@ -221,13 +224,13 @@ async def test_gh_session_factory_cleanup() -> None: mock_copilot_session.destroy = AsyncMock() mock_client.create_session = AsyncMock(return_value=mock_copilot_session) - base_config = { + base_config = cast(CopilotSessionConfig, { "model": "gpt-4", "skill_directories": [], "streaming": True, "system_message": "Test", "on_permission_request": None, - } + }) factory = GhCopilotSessionFactory(mock_client, base_config) config = SessionConfig(model_name=None, clear_history=True) @@ -245,13 +248,13 @@ async def test_gh_session_factory_cleanup_without_session() -> None: """Test that cleanup works when no session exists.""" mock_client = MagicMock() - base_config = { + base_config = cast(CopilotSessionConfig, { "model": "gpt-4", "skill_directories": [], "streaming": True, "system_message": "Test", "on_permission_request": None, - } + }) factory = GhCopilotSessionFactory(mock_client, base_config) @@ -267,13 +270,13 @@ async def test_gh_session_factory_cleanup_handles_error() -> None: mock_copilot_session.destroy = AsyncMock(side_effect=Exception("Cleanup failed")) mock_client.create_session = AsyncMock(return_value=mock_copilot_session) - base_config = { + base_config = cast(CopilotSessionConfig, { "model": "gpt-4", "skill_directories": [], "streaming": True, "system_message": "Test", "on_permission_request": None, - } + }) factory = GhCopilotSessionFactory(mock_client, base_config) config = SessionConfig(model_name=None, clear_history=True) @@ -294,13 +297,13 @@ async def test_gh_session_factory_registers_user_input_handler() -> None: mock_copilot_session.destroy = AsyncMock() mock_client.create_session = AsyncMock(return_value=mock_copilot_session) - base_config = { + base_config = cast(CopilotSessionConfig, { "model": "gpt-4", "skill_directories": ["/default/skills"], "streaming": True, "system_message": "You are a helpful assistant", "on_permission_request": None, - } + }) factory = GhCopilotSessionFactory(mock_client, base_config) config = SessionConfig(model_name=None, clear_history=True)