Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 46 additions & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
# Git
.git
.gitignore

# Python
__pycache__
*.py[cod]
*$py.class
*.so
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# Virtual Environment
.venv
venv/
ENV/
env/

# Tool specific
.vscode/
.idea/
.mypy_cache/
.ruff_cache/
.pytest_cache/
.coverage
htmlcov/

# Local configuration
.env
.agentc/
7 changes: 7 additions & 0 deletions AGENTS.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
```
- **Run Agent C (Textual UI)**: `uv run agent-c`
- **Run Console UI**: `uv run run-console`
- **Run in Docker**: `docker compose up --build`
- **Test all**: `uv run pytest`
- **Test agentc core**: `uv run pytest tests/core/ tests/middleware/ tests/adapters/`
- **Type check**: `uv run mypy`
Expand All @@ -31,6 +32,12 @@ uv sync
uv run agent-c # Launch the Textual UI
```

## Docker Quickstart

```powershell
docker compose up --build
```

## Architecture Overview

> For visual architecture diagrams, see the [Architecture section in README.md](README.md#architecture).
Expand Down
1 change: 1 addition & 0 deletions ARCHITECTURE.md
Original file line number Diff line number Diff line change
Expand Up @@ -203,6 +203,7 @@ Entries from earlier locations override those with the same name later.
[backends.ollama]
provider_cls = "pydantic_ai.providers.ollama.OllamaProvider"
model_cls = "pydantic_ai.models.openai.OpenAIChatModel"
base_url_env = "OLLAMA_HOST"
base_url = "http://localhost:11434/v1"

[models.ollama-gpt-oss-120b]
Expand Down
33 changes: 33 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
FROM python:3.13-slim

# Install uv
COPY --from=ghcr.io/astral-sh/uv:latest /uv /bin/uv

# Include git for version info if needed, though strictly not required for runtime unless the tool uses git
RUN apt-get update && apt-get install -y git && rm -rf /var/lib/apt/lists/*

WORKDIR /app

# Enable bytecode compilation
ENV UV_COMPILE_BYTECODE=1

# Copy dependency definition to cache dependencies
COPY pyproject.toml uv.lock ./

# Install dependencies
RUN uv sync --frozen --no-install-project --no-dev

# Copy the project
COPY . .

# Install the project
RUN uv sync --frozen --no-dev

# Place the virtual environment in the path
ENV PATH="/app/.venv/bin:$PATH"

# Set the working directory for the user's workspace
WORKDIR /workspace

# Default entrypoint
ENTRYPOINT ["agent-c"]
12 changes: 12 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,16 @@ Type commands or questions. Exit with "/quit" or "/exit".

For other providers (OpenAI, Anthropic), see **Configuration** below.

### 4. Run in Docker (optional)

```bash
docker compose up --build
```

This runs the Textual UI in the container. The compose file sets `OLLAMA_HOST` to
`http://host.docker.internal:11434/v1` so the container can reach a host Ollama.
Override it if your Ollama host runs elsewhere.

## Installation

### For Development
Expand Down Expand Up @@ -98,6 +108,7 @@ Create `.agentc/providers.toml` in your project or `~/.agentc/providers.toml` in
[backends.my-custom-ollama]
provider_cls = "pydantic_ai.providers.ollama.OllamaProvider"
model_cls = "pydantic_ai.models.openai.OpenAIChatModel"
base_url_env = "OLLAMA_HOST"
base_url = "http://localhost:11434/v1"

[backends.openai]
Expand All @@ -119,6 +130,7 @@ model_name = "gpt-4o"
- `model_cls`: Full Python path to the model class
- `model_name`: Model identifier (e.g., `gpt-4o`, `deepseek-r1:32b`)
- `api_key_env`: (Optional) Environment variable name for API key
- `base_url_env`: (Optional) Environment variable name for base URL overrides
- `base_url`: (Optional) Custom base URL for the backend
- `params`: (Optional) Keyword arguments forwarded to the model constructor (e.g., `temperature`)

Expand Down
22 changes: 22 additions & 0 deletions docker-compose.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
services:
agent-c:
build: .
image: agent-c:latest
container_name: agent-c
# Mount the current directory to /workspace so Agent C can edit files here
volumes:
- .:/workspace
# Optional: Persist agent memory/history if strictly needed, though usually it writes to files in workspace
# - agent-data:/root/.local/share/agentc
environment:
# Pass through API keys
- ANTHROPIC_API_KEY
- OPENAI_API_KEY
- OLLAMA_HOST=http://host.docker.internal:11434/v1

# Allow accessing the host machine via host.docker.internal on Linux too
extra_hosts:
- "host.docker.internal:host-gateway"
# Enable TTY and stdin for Textual UI
stdin_open: true
tty: true
13 changes: 12 additions & 1 deletion src/agentc/core/backends/pydantic_ai/provider_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ def _load_single_file(path: Path) -> tuple[dict[str, BackendConfig], dict[str, M
provider_cls_path=config["provider_cls"],
model_cls_path=config["model_cls"],
api_key_env=config.get("api_key_env"),
base_url_env=config.get("base_url_env"),
base_url=config.get("base_url"),
)

Expand All @@ -56,6 +57,7 @@ def _load_single_file(path: Path) -> tuple[dict[str, BackendConfig], dict[str, M
backend=config["backend"],
model_name=config["model_name"],
api_key_env=config.get("api_key_env"),
base_url_env=config.get("base_url_env"),
base_url=config.get("base_url"),
params=params,
)
Expand Down Expand Up @@ -135,7 +137,16 @@ def build_model(
model_cls = get_class(backend_config.model_cls_path)

api_key_env = model_config.api_key_env or backend_config.api_key_env
base_url = model_config.base_url or backend_config.base_url
base_url_env = model_config.base_url_env or backend_config.base_url_env
base_url_from_env: str | None = None
if base_url_env:
base_url_from_env = os.getenv(base_url_env)

base_url: str | None
if base_url_from_env is not None:
base_url = base_url_from_env
else:
base_url = model_config.base_url or backend_config.base_url

provider_kwargs: dict[str, Any] = {}
if api_key_env:
Expand Down
2 changes: 2 additions & 0 deletions src/agentc/core/config_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ class BackendConfig:
provider_cls_path: str # e.g., "pydantic_ai.providers.anthropic.AnthropicProvider"
model_cls_path: str # e.g., "pydantic_ai.models.anthropic.AnthropicModel"
api_key_env: str | None = None
base_url_env: str | None = None
base_url: str | None = None


Expand All @@ -29,6 +30,7 @@ class ModelConfig:
backend: str
model_name: str
api_key_env: str | None = None
base_url_env: str | None = None
base_url: str | None = None
params: dict[str, Any] = field(default_factory=dict)

Expand Down
1 change: 1 addition & 0 deletions src/agentc/providers.toml
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ api_key_env = "MISTRAL_API_KEY"
[backends.ollama]
provider_cls = "pydantic_ai.providers.ollama.OllamaProvider"
model_cls = "pydantic_ai.models.openai.OpenAIChatModel"
base_url_env = "OLLAMA_HOST"
base_url = "http://localhost:11434/v1"

[backends.openai]
Expand Down
8 changes: 7 additions & 1 deletion tests/core/test_provider_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ def test_load_providers_merges_with_priority(tmp_path: Path) -> None:
provider_cls = "pydantic_ai.providers.ollama.OllamaProvider"
model_cls = "pydantic_ai.models.openai.OpenAIChatModel"
base_url = "http://low"
base_url_env = "LOW_BASE_URL"

[backends.other]
provider_cls = "OtherProvider"
Expand All @@ -72,6 +73,7 @@ def test_load_providers_merges_with_priority(tmp_path: Path) -> None:
provider_cls = "pydantic_ai.providers.ollama.OllamaProvider"
model_cls = "pydantic_ai.models.openai.OpenAIChatModel"
base_url = "http://high"
base_url_env = "HIGH_BASE_URL"

[models.local]
backend = "ollama"
Expand All @@ -84,6 +86,7 @@ def test_load_providers_merges_with_priority(tmp_path: Path) -> None:
backends, models = load_providers([high_dir, low_dir])

assert backends["ollama"].base_url == "http://high"
assert backends["ollama"].base_url_env == "HIGH_BASE_URL"
assert models["local"].model_name == "ollama-high"
assert models["local"].params["temperature"] == 0.1

Expand All @@ -100,13 +103,15 @@ def test_build_model_prefers_model_overrides(monkeypatch: pytest.MonkeyPatch) ->
provider_cls_path="provider.Path",
model_cls_path="model.Path",
api_key_env="BACKEND_KEY",
base_url_env="BACKEND_URL",
base_url="http://backend",
)
model = ModelConfig(
name="preset",
backend="backend",
model_name="model-str",
api_key_env="MODEL_KEY",
base_url_env="MODEL_URL",
base_url="http://model",
params={"temperature": 0.3},
)
Expand All @@ -123,13 +128,14 @@ def get_class_side_effect(path: str) -> MagicMock:

monkeypatch.setenv("MODEL_KEY", "model-secret")
monkeypatch.setenv("BACKEND_KEY", "backend-secret")
monkeypatch.setenv("MODEL_URL", "http://env-model")

with patch("agentc.core.backends.pydantic_ai.provider_loader._get_class", side_effect=get_class_side_effect):
provider, _ = build_model(model, backend)

mock_provider_cls.assert_called_once_with(
api_key="model-secret",
base_url="http://model",
base_url="http://env-model",
)
mock_model_cls.assert_called_once()
kwargs = mock_model_cls.call_args.kwargs
Expand Down
Loading