diff --git a/backend/web/core/lifespan.py b/backend/web/core/lifespan.py
index 0778afe61..5da8971d8 100644
--- a/backend/web/core/lifespan.py
+++ b/backend/web/core/lifespan.py
@@ -273,3 +273,7 @@ async def _wechat_deliver(conn, msg):
agent.close()
except Exception as e:
print(f"[web] Agent cleanup error: {e}")
+
+ # Cleanup: stop LSP language servers
+ from core.tools.lsp.service import lsp_pool
+ await lsp_pool.close_all()
diff --git a/config/defaults/tool_catalog.py b/config/defaults/tool_catalog.py
index 294293874..c76409286 100644
--- a/config/defaults/tool_catalog.py
+++ b/config/defaults/tool_catalog.py
@@ -72,6 +72,7 @@ class ToolDef(BaseModel):
ToolDef(name="load_skill", desc="加载 Skill", group=ToolGroup.SKILLS),
# system
ToolDef(name="tool_search", desc="搜索可用工具", group=ToolGroup.SYSTEM),
+ ToolDef(name="LSP", desc="Language Server Protocol 操作", group=ToolGroup.SYSTEM, mode=ToolMode.DEFERRED, default=False),
# taskboard — all off by default; enable on dedicated scheduler members
ToolDef(name="ListBoardTasks", desc="列出任务板上的任务", group=ToolGroup.TASKBOARD, default=False),
ToolDef(name="ClaimTask", desc="认领一个任务板任务", group=ToolGroup.TASKBOARD, default=False),
diff --git a/core/agents/communication/chat_tool_service.py b/core/agents/communication/chat_tool_service.py
index 4496a97ef..5dd710581 100644
--- a/core/agents/communication/chat_tool_service.py
+++ b/core/agents/communication/chat_tool_service.py
@@ -152,33 +152,158 @@ def _fetch_by_range(self, chat_id: str, parsed: dict) -> list:
before=parsed["before"],
)
- def _register_chats(self, registry: ToolRegistry) -> None:
+ def _handle_chats(self, unread_only: bool = False, limit: int = 20) -> str:
+ eid = self._entity_id
+ chats = self._chat_service.list_chats_for_entity(eid)
+ if unread_only:
+ chats = [c for c in chats if c.get("unread_count", 0) > 0]
+ chats = chats[:limit]
+ if not chats:
+ return "No chats found."
+ lines = []
+ for c in chats:
+ others = [e for e in c.get("entities", []) if e["id"] != eid]
+ name = ", ".join(e["name"] for e in others) or "Unknown"
+ unread = c.get("unread_count", 0)
+ last = c.get("last_message")
+ last_preview = f' — last: "{last["content"][:50]}"' if last else ""
+ unread_str = f" ({unread} unread)" if unread > 0 else ""
+ is_group = len(others) >= 2
+ if is_group:
+ id_str = f" [chat_id: {c['id']}]"
+ else:
+ other_id = others[0]["id"] if others else ""
+ id_str = f" [entity_id: {other_id}]" if other_id else ""
+ lines.append(f"- {name}{id_str}{unread_str}{last_preview}")
+ return "\n".join(lines)
+
+ def _handle_chat_read(self, entity_id: str | None = None, chat_id: str | None = None, range: str | None = None) -> str:
eid = self._entity_id
+ if chat_id:
+ pass # use chat_id directly
+ elif entity_id:
+ chat_id = self._chat_entities.find_chat_between(eid, entity_id)
+ if not chat_id:
+ target = self._entities.get_by_id(entity_id)
+ name = target.name if target else entity_id
+ return f"No chat history with {name}."
+ else:
+ return "Provide entity_id or chat_id."
+
+ # @@@range-dispatch — if range is provided, use it regardless of unread state.
+ if range:
+ try:
+ parsed = _parse_range(range)
+ except ValueError as e:
+ return str(e)
+ msgs = self._fetch_by_range(chat_id, parsed)
+ if not msgs:
+ return "No messages in that range."
+ # @@@range-marks-read — WORKAROUND: unblock chat_send by pushing
+ # last_read_at to now. This marks ALL messages as read, not just
+ # the requested range. Proper fix needs per-message read tracking
+ # instead of the current single-timestamp waterline model.
+ self._chat_entities.update_last_read(chat_id, eid, time.time())
+ return self._format_msgs(msgs, eid)
+
+ # @@@read-unread-only — default to unread messages only.
+ msgs = self._messages.list_unread(chat_id, eid)
+ if msgs:
+ self._chat_entities.update_last_read(chat_id, eid, time.time())
+ return self._format_msgs(msgs, eid)
+
+ # Nothing unread — prompt agent to use range parameter
+ return (
+ "No unread messages. To read history, call again with range:\n"
+ " range='-10:-1' (last 10 messages)\n"
+ " range='-5:' (last 5 messages)\n"
+ " range='-1h:' (last hour)\n"
+ " range='-2d:-1d' (yesterday)\n"
+ " range='2026-03-20:2026-03-22' (date range)"
+ )
- def handle(unread_only: bool = False, limit: int = 20) -> str:
- chats = self._chat_service.list_chats_for_entity(eid)
- if unread_only:
- chats = [c for c in chats if c.get("unread_count", 0) > 0]
- chats = chats[:limit]
- if not chats:
- return "No chats found."
- lines = []
- for c in chats:
- others = [e for e in c.get("entities", []) if e["id"] != eid]
- name = ", ".join(e["name"] for e in others) or "Unknown"
- unread = c.get("unread_count", 0)
- last = c.get("last_message")
- last_preview = f' — last: "{last["content"][:50]}"' if last else ""
- unread_str = f" ({unread} unread)" if unread > 0 else ""
- is_group = len(others) >= 2
- if is_group:
- id_str = f" [chat_id: {c['id']}]"
- else:
- other_id = others[0]["id"] if others else ""
- id_str = f" [entity_id: {other_id}]" if other_id else ""
- lines.append(f"- {name}{id_str}{unread_str}{last_preview}")
- return "\n".join(lines)
+ def _handle_chat_send(
+ self,
+ content: str,
+ entity_id: str | None = None,
+ chat_id: str | None = None,
+ signal: str = "open",
+ mentions: list[str] | None = None,
+ ) -> str:
+ eid = self._entity_id
+ # @@@read-before-write — resolve chat_id, then check unread
+ resolved_chat_id = chat_id
+ target_name = "chat"
+
+ if chat_id:
+ if not self._chat_entities.is_entity_in_chat(chat_id, eid):
+ raise RuntimeError(f"You are not a member of chat {chat_id}")
+ elif entity_id:
+ if entity_id == eid:
+ raise RuntimeError("Cannot send a message to yourself.")
+ target = self._entities.get_by_id(entity_id)
+ if not target:
+ raise RuntimeError(f"Entity not found: {entity_id}")
+ target_name = target.name
+ resolved_chat_id = self._chat_entities.find_chat_between(eid, entity_id)
+ if not resolved_chat_id:
+ # New chat — no unread possible, create and send
+ chat = self._chat_service.find_or_create_chat([eid, entity_id])
+ resolved_chat_id = chat.id
+ else:
+ raise RuntimeError("Provide entity_id (for 1:1) or chat_id (for group)")
+ # @@@read-before-write-gate — reject if unread messages exist
+ unread = self._messages.count_unread(resolved_chat_id, eid)
+ if unread > 0:
+ raise RuntimeError(f"You have {unread} unread message(s). Call chat_read(chat_id='{resolved_chat_id}') first.")
+
+ # Append signal to content (for chat_read) + pass through chain (for notification)
+ effective_signal = signal if signal in ("yield", "close") else None
+ if effective_signal:
+ content = f"{content}\n[signal: {effective_signal}]"
+
+ self._chat_service.send_message(resolved_chat_id, eid, content, mentions, signal=effective_signal)
+ return f"Message sent to {target_name}."
+
+ def _handle_chat_search(self, query: str, entity_id: str | None = None) -> str:
+ eid = self._entity_id
+ chat_id = None
+ if entity_id:
+ chat_id = self._chat_entities.find_chat_between(eid, entity_id)
+ results = self._messages.search(query, chat_id=chat_id, limit=20)
+ if not results:
+ return f"No messages matching '{query}'."
+ lines = []
+ for m in results:
+ sender = self._entities.get_by_id(m.sender_entity_id)
+ name = sender.name if sender else "unknown"
+ lines.append(f"[{name}] {m.content[:100]}")
+ return "\n".join(lines)
+
+ def _handle_directory(self, search: str | None = None, type: str | None = None) -> str:
+ eid = self._entity_id
+ all_entities = self._entities.list_all()
+ entities = [e for e in all_entities if e.id != eid]
+ if type:
+ entities = [e for e in entities if e.type == type]
+ if search:
+ q = search.lower()
+ entities = [e for e in entities if q in e.name.lower()]
+ if not entities:
+ return "No entities found."
+ lines = []
+ for e in entities:
+ member = self._members.get_by_id(e.member_id)
+ owner_info = ""
+ if e.type == "agent" and member and member.owner_id:
+ owner_member = self._members.get_by_id(member.owner_id)
+ if owner_member:
+ owner_info = f" (owner: {owner_member.name})"
+ lines.append(f"- {e.name} [{e.type}] entity_id={e.id}{owner_info}")
+ return "\n".join(lines)
+
+ def _register_chats(self, registry: ToolRegistry) -> None:
registry.register(
ToolEntry(
name="chats",
@@ -198,58 +323,15 @@ def handle(unread_only: bool = False, limit: int = 20) -> str:
},
},
},
- handler=handle,
+ handler=self._handle_chats,
source="chat",
+ search_hint="list chats conversations unread messages",
+ is_read_only=True,
+ is_concurrency_safe=True,
)
)
def _register_chat_read(self, registry: ToolRegistry) -> None:
- eid = self._entity_id
-
- def handle(entity_id: str | None = None, chat_id: str | None = None, range: str | None = None) -> str:
- if chat_id:
- pass # use chat_id directly
- elif entity_id:
- chat_id = self._chat_entities.find_chat_between(eid, entity_id)
- if not chat_id:
- target = self._entities.get_by_id(entity_id)
- name = target.name if target else entity_id
- return f"No chat history with {name}."
- else:
- return "Provide entity_id or chat_id."
-
- # @@@range-dispatch — if range is provided, use it regardless of unread state.
- if range:
- try:
- parsed = _parse_range(range)
- except ValueError as e:
- return str(e)
- msgs = self._fetch_by_range(chat_id, parsed)
- if not msgs:
- return "No messages in that range."
- # @@@range-marks-read — WORKAROUND: unblock chat_send by pushing
- # last_read_at to now. This marks ALL messages as read, not just
- # the requested range. Proper fix needs per-message read tracking
- # instead of the current single-timestamp waterline model.
- self._chat_entities.update_last_read(chat_id, eid, time.time())
- return self._format_msgs(msgs, eid)
-
- # @@@read-unread-only — default to unread messages only.
- msgs = self._messages.list_unread(chat_id, eid)
- if msgs:
- self._chat_entities.update_last_read(chat_id, eid, time.time())
- return self._format_msgs(msgs, eid)
-
- # Nothing unread — prompt agent to use range parameter
- return (
- "No unread messages. To read history, call again with range:\n"
- " range='-10:-1' (last 10 messages)\n"
- " range='-5:' (last 5 messages)\n"
- " range='-1h:' (last hour)\n"
- " range='-2d:-1d' (yesterday)\n"
- " range='2026-03-20:2026-03-22' (date range)"
- )
-
registry.register(
ToolEntry(
name="chat_read",
@@ -277,56 +359,15 @@ def handle(entity_id: str | None = None, chat_id: str | None = None, range: str
},
},
},
- handler=handle,
+ handler=self._handle_chat_read,
source="chat",
+ search_hint="read chat messages history conversation",
+ is_read_only=True,
+ is_concurrency_safe=True,
)
)
def _register_chat_send(self, registry: ToolRegistry) -> None:
- eid = self._entity_id
-
- def handle(
- content: str,
- entity_id: str | None = None,
- chat_id: str | None = None,
- signal: str = "open",
- mentions: list[str] | None = None,
- ) -> str:
- # @@@read-before-write — resolve chat_id, then check unread
- resolved_chat_id = chat_id
- target_name = "chat"
-
- if chat_id:
- if not self._chat_entities.is_entity_in_chat(chat_id, eid):
- raise RuntimeError(f"You are not a member of chat {chat_id}")
- elif entity_id:
- if entity_id == eid:
- raise RuntimeError("Cannot send a message to yourself.")
- target = self._entities.get_by_id(entity_id)
- if not target:
- raise RuntimeError(f"Entity not found: {entity_id}")
- target_name = target.name
- resolved_chat_id = self._chat_entities.find_chat_between(eid, entity_id)
- if not resolved_chat_id:
- # New chat — no unread possible, create and send
- chat = self._chat_service.find_or_create_chat([eid, entity_id])
- resolved_chat_id = chat.id
- else:
- raise RuntimeError("Provide entity_id (for 1:1) or chat_id (for group)")
-
- # @@@read-before-write-gate — reject if unread messages exist
- unread = self._messages.count_unread(resolved_chat_id, eid)
- if unread > 0:
- raise RuntimeError(f"You have {unread} unread message(s). Call chat_read(chat_id='{resolved_chat_id}') first.")
-
- # Append signal to content (for chat_read) + pass through chain (for notification)
- effective_signal = signal if signal in ("yield", "close") else None
- if effective_signal:
- content = f"{content}\n[signal: {effective_signal}]"
-
- self._chat_service.send_message(resolved_chat_id, eid, content, mentions, signal=effective_signal)
- return f"Message sent to {target_name}."
-
registry.register(
ToolEntry(
name="chat_send",
@@ -363,28 +404,13 @@ def handle(
"required": ["content"],
},
},
- handler=handle,
+ handler=self._handle_chat_send,
source="chat",
+ search_hint="send message reply chat entity",
)
)
def _register_chat_search(self, registry: ToolRegistry) -> None:
- eid = self._entity_id
-
- def handle(query: str, entity_id: str | None = None) -> str:
- chat_id = None
- if entity_id:
- chat_id = self._chat_entities.find_chat_between(eid, entity_id)
- results = self._messages.search(query, chat_id=chat_id, limit=20)
- if not results:
- return f"No messages matching '{query}'."
- lines = []
- for m in results:
- sender = self._entities.get_by_id(m.sender_entity_id)
- name = sender.name if sender else "unknown"
- lines.append(f"[{name}] {m.content[:100]}")
- return "\n".join(lines)
-
registry.register(
ToolEntry(
name="chat_search",
@@ -404,35 +430,15 @@ def handle(query: str, entity_id: str | None = None) -> str:
"required": ["query"],
},
},
- handler=handle,
+ handler=self._handle_chat_search,
source="chat",
+ search_hint="search messages query chat history",
+ is_read_only=True,
+ is_concurrency_safe=True,
)
)
def _register_directory(self, registry: ToolRegistry) -> None:
- eid = self._entity_id
-
- def handle(search: str | None = None, type: str | None = None) -> str:
- all_entities = self._entities.list_all()
- entities = [e for e in all_entities if e.id != eid]
- if type:
- entities = [e for e in entities if e.type == type]
- if search:
- q = search.lower()
- entities = [e for e in entities if q in e.name.lower()]
- if not entities:
- return "No entities found."
- lines = []
- for e in entities:
- member = self._members.get_by_id(e.member_id)
- owner_info = ""
- if e.type == "agent" and member and member.owner_id:
- owner_member = self._members.get_by_id(member.owner_id)
- if owner_member:
- owner_info = f" (owner: {owner_member.name})"
- lines.append(f"- {e.name} [{e.type}] entity_id={e.id}{owner_info}")
- return "\n".join(lines)
-
registry.register(
ToolEntry(
name="directory",
@@ -448,7 +454,10 @@ def handle(search: str | None = None, type: str | None = None) -> str:
},
},
},
- handler=handle,
+ handler=self._handle_directory,
source="chat",
+ search_hint="browse entity directory find agent human",
+ is_read_only=True,
+ is_concurrency_safe=True,
)
)
diff --git a/core/agents/communication/delivery.py b/core/agents/communication/delivery.py
index 8a92d2dc8..9b2acf962 100644
--- a/core/agents/communication/delivery.py
+++ b/core/agents/communication/delivery.py
@@ -7,6 +7,7 @@
from __future__ import annotations
+import functools
import logging
from typing import Any
@@ -41,18 +42,20 @@ def _deliver(
loop,
)
- def _on_done(f):
- exc = f.exception()
- if exc:
- logger.error("[delivery] async delivery failed for %s: %s", entity.id, exc, exc_info=exc)
- else:
- logger.info("[delivery] async delivery completed for %s", entity.id)
-
- future.add_done_callback(_on_done)
+ future.add_done_callback(functools.partial(_log_delivery_result, entity.id))
return _deliver
+def _log_delivery_result(entity_id: str, f: Any) -> None:
+ """Done-callback for async delivery futures."""
+ exc = f.exception()
+ if exc:
+ logger.error("[delivery] async delivery failed for %s: %s", entity_id, exc, exc_info=exc)
+ else:
+ logger.info("[delivery] async delivery completed for %s", entity_id)
+
+
async def _async_deliver(
app: Any,
entity: EntityRow,
diff --git a/core/agents/service.py b/core/agents/service.py
index e7baff89b..20ae51f61 100644
--- a/core/agents/service.py
+++ b/core/agents/service.py
@@ -21,20 +21,85 @@
logger = logging.getLogger(__name__)
+# ── Sub-agent tool filtering (CC alignment) ──────────────────────────────────
+# Tools that sub-agents must never access (prevents controlling parent).
+AGENT_DISALLOWED: set[str] = {"TaskOutput", "TaskStop", "Agent"}
+
+# Per-type allowed tool sets. Tools not in the set are blocked.
+EXPLORE_ALLOWED: set[str] = {"Read", "Grep", "Glob", "list_dir", "WebSearch", "WebFetch", "tool_search"}
+PLAN_ALLOWED: set[str] = EXPLORE_ALLOWED # plan agents are also read-only
+BASH_ALLOWED: set[str] = {"Bash", "Read", "Grep", "Glob", "list_dir", "tool_search"}
+
+
+def _get_tool_filters(subagent_type: str) -> tuple[set[str], set[str] | None]:
+ """Return (extra_blocked_tools, allowed_tools) for a sub-agent type.
+
+ For explore/plan/bash: use allowed_tools whitelist (ToolRegistry skips unmatched).
+ For general: only block AGENT_DISALLOWED, no whitelist.
+ """
+ agent_type = subagent_type.lower()
+ allowed_map: dict[str, set[str]] = {
+ "explore": EXPLORE_ALLOWED,
+ "plan": PLAN_ALLOWED,
+ "bash": BASH_ALLOWED,
+ }
+
+ if agent_type in allowed_map:
+ return AGENT_DISALLOWED, allowed_map[agent_type]
+
+ # general: only block parent-controlling tools, no whitelist
+ return AGENT_DISALLOWED, None
+
+
+def _filter_fork_messages(messages: list) -> list:
+ """Filter parent messages for forkContext sub-agent spawning.
+
+ Equivalent to CC's yF0: removes assistant messages whose tool_use blocks
+ have no matching tool_result in a subsequent user message (orphan tool_use).
+ Orphan tool_use blocks cause Anthropic API validation errors.
+ """
+ # Collect all tool_use_ids that have a corresponding tool_result
+ answered: set[str] = set()
+ for msg in messages:
+ # ToolMessage or user message with tool_result content
+ tool_call_id = getattr(msg, "tool_call_id", None)
+ if tool_call_id:
+ answered.add(tool_call_id)
+ content = getattr(msg, "content", None)
+ if isinstance(content, list):
+ for block in content:
+ if isinstance(block, dict) and block.get("type") == "tool_result":
+ tid = block.get("tool_use_id") or block.get("tool_call_id")
+ if tid:
+ answered.add(tid)
+
+ result = []
+ for msg in messages:
+ content = getattr(msg, "content", None)
+ if isinstance(content, list):
+ tool_uses = [b for b in content if isinstance(b, dict) and b.get("type") == "tool_use"]
+ if tool_uses and any(b.get("id") not in answered for b in tool_uses):
+ continue # skip assistant message with unanswered tool_use
+ result.append(msg)
+ return result
+
AGENT_SCHEMA = {
"name": "Agent",
"description": (
- "Launch a new agent to handle complex tasks autonomously. "
- "Use subagent_type to select a specialized agent, or omit for default. "
- "Agents run independently with their own tool stack."
+ "Launch a sub-agent for independent task execution. "
+ "Types: explore (read-only codebase search), plan (architecture design, read-only), "
+ "bash (shell commands only), general (full tool access). "
+ "Use for: multi-step tasks, parallel work, tasks needing isolation. "
+ "Do NOT use for simple file reads or single grep searches — use the tools directly."
),
"parameters": {
"type": "object",
"properties": {
"subagent_type": {
"type": "string",
- "description": "Type of agent to spawn (e.g. 'Explore', 'Coder'). Omit for general-purpose.",
+ "enum": ["explore", "plan", "general", "bash"],
+ "description": "Type of agent to spawn. Omit for general-purpose.",
},
"prompt": {
"type": "string",
@@ -60,6 +125,16 @@
"type": "integer",
"description": "Maximum turns the agent can take",
},
+ "fork_context": {
+ "type": "boolean",
+ "default": False,
+ "description": (
+ "Inherit parent conversation history as read-only context. "
+ "Use when the sub-agent needs background from the parent's work. "
+ "Adds a ### ENTERING SUB-AGENT ROUTINE ### marker so the sub-agent "
+ "knows which messages are context vs its actual task."
+ ),
+ },
},
"required": ["prompt"],
},
@@ -67,7 +142,7 @@
TASK_OUTPUT_SCHEMA = {
"name": "TaskOutput",
- "description": "Get the output of a background agent task by its task_id.",
+ "description": "Get output of a background task (agent or bash). Blocks until task completes by default. Returns full text output or error.",
"parameters": {
"type": "object",
"properties": {
@@ -82,7 +157,7 @@
TASK_STOP_SCHEMA = {
"name": "TaskStop",
- "description": "Stop a running background agent task.",
+ "description": "Cancel a running background task. Sends cancellation signal; task may take a moment to stop.",
"parameters": {
"type": "object",
"properties": {
@@ -185,6 +260,7 @@ def __init__(
schema=AGENT_SCHEMA,
handler=self._handle_agent,
source="AgentService",
+ search_hint="launch sub-agent spawn parallel task independent",
)
)
tool_registry.register(
@@ -194,6 +270,9 @@ def __init__(
schema=TASK_OUTPUT_SCHEMA,
handler=self._handle_task_output,
source="AgentService",
+ search_hint="get background task output result poll",
+ is_read_only=True,
+ is_concurrency_safe=True,
)
)
tool_registry.register(
@@ -203,6 +282,7 @@ def __init__(
schema=TASK_STOP_SCHEMA,
handler=self._handle_task_stop,
source="AgentService",
+ search_hint="stop cancel background task agent",
)
)
@@ -214,6 +294,7 @@ async def _handle_agent(
description: str | None = None,
run_in_background: bool = False,
max_turns: int | None = None,
+ fork_context: bool = False,
) -> str:
"""Spawn an independent LeonAgent and run it with the given prompt."""
from sandbox.thread_context import get_current_thread_id
@@ -245,6 +326,7 @@ async def _handle_agent(
max_turns,
description=description or "",
run_in_background=run_in_background,
+ fork_context=fork_context,
)
)
if run_in_background:
@@ -281,6 +363,7 @@ async def _run_agent(
max_turns: int | None,
description: str = "",
run_in_background: bool = False,
+ fork_context: bool = False,
) -> str:
"""Create and run an independent LeonAgent, collect its text output."""
# Isolate this sub-agent from the parent's LangChain callback chain.
@@ -316,11 +399,44 @@ async def _run_agent(
agent = None
try:
- agent = create_leon_agent(
- model_name=self._model_name,
- workspace_root=self._workspace_root,
- verbose=False,
- )
+ # Sub-agent context trimming: each spawn creates a fresh LeonAgent
+ # with its own _build_system_prompt(). No CLAUDE.md content or
+ # gitStatus is injected into the prompt pipeline (core/runtime/prompts
+ # has no such injection). Therefore explore/plan/bash sub-agents
+ # already run lightweight — no extra trimming is needed.
+ #
+ # Try to use context fork from parent agent's BootstrapConfig.
+ # Falls back to create_leon_agent when bootstrap is not available.
+ # Compute tool filtering for this sub-agent type
+ extra_blocked, allowed = _get_tool_filters(subagent_type)
+
+ try:
+ from core.runtime.fork import fork_context
+
+ # Parent bootstrap is stored on the ToolUseContext or agent instance.
+ # AgentService stores workspace_root and model_name directly; use those
+ # to check if a richer bootstrap is available via a shared reference.
+ # _parent_bootstrap is injected by LeonAgent when building AgentService.
+ parent_bootstrap = getattr(self, "_parent_bootstrap", None)
+ if parent_bootstrap is not None:
+ child_bootstrap = fork_context(parent_bootstrap)
+ agent = create_leon_agent(
+ model_name=child_bootstrap.model_name,
+ workspace_root=child_bootstrap.workspace_root,
+ extra_blocked_tools=extra_blocked,
+ allowed_tools=allowed,
+ verbose=False,
+ )
+ else:
+ raise AttributeError("no parent bootstrap")
+ except (AttributeError, ImportError):
+ agent = create_leon_agent(
+ model_name=self._model_name,
+ workspace_root=self._workspace_root,
+ extra_blocked_tools=extra_blocked,
+ allowed_tools=allowed,
+ verbose=False,
+ )
# In async context LeonAgent defers checkpointer init; call ainit() to
# ensure state is persisted (and loadable via GET /api/threads/{thread_id}).
await agent.ainit()
@@ -354,8 +470,24 @@ async def _run_agent(
config = {"configurable": {"thread_id": thread_id}}
output_parts: list[str] = []
+ # Build initial input — with or without forked parent context
+ if fork_context:
+ from sandbox.thread_context import get_current_messages
+ parent_msgs = get_current_messages()
+ _FORK_MARKER = (
+ "\n\n### ENTERING SUB-AGENT ROUTINE ###\n"
+ "Messages above are from the parent thread (read-only context).\n"
+ "Only complete the specific task assigned below.\n\n"
+ )
+ initial_messages: list = [
+ *_filter_fork_messages(parent_msgs),
+ {"role": "user", "content": _FORK_MARKER + prompt},
+ ]
+ else:
+ initial_messages = [{"role": "user", "content": prompt}]
+
async for chunk in agent.agent.astream(
- {"messages": [{"role": "user", "content": prompt}]},
+ {"messages": initial_messages},
config=config,
stream_mode="updates",
):
diff --git a/core/runtime/agent.py b/core/runtime/agent.py
index 962451ebb..5d1e62ba9 100644
--- a/core/runtime/agent.py
+++ b/core/runtime/agent.py
@@ -18,12 +18,13 @@
All paths must be absolute. Full security mechanisms and audit logging.
"""
+import concurrent.futures
+import functools
import os
import threading
from pathlib import Path
from typing import Any
-from langchain.agents import create_agent
from langchain.chat_models import init_chat_model
from langchain_core.messages import SystemMessage
from langgraph.checkpoint.sqlite.aio import AsyncSqliteSaver
@@ -62,8 +63,11 @@
from core.runtime.middleware.spill_buffer import SpillBufferMiddleware # noqa: E402
# New architecture: ToolRegistry + ToolRunner + Services
+from core.runtime.cleanup import CleanupRegistry # noqa: E402
+from core.runtime.loop import QueryLoop # noqa: E402
from core.runtime.registry import ToolRegistry # noqa: E402
from core.runtime.runner import ToolRunner # noqa: E402
+from core.runtime.state import BootstrapConfig # noqa: E402
from core.runtime.validator import ToolValidator # noqa: E402
# Hooks (used by Services)
@@ -86,6 +90,20 @@
apply_usage_patches()
+def _lookup_wechat_conn(eid: str):
+ """Lazy WeChat connection lookup by owner entity ID.
+
+ Called at tool invocation time — app.state may not be populated at registration.
+ """
+ try:
+ from backend.web.main import app # noqa: PLC0415
+
+ registry = getattr(app.state, "wechat_registry", None)
+ return registry.get(eid) if registry else None
+ except Exception:
+ return None
+
+
class LeonAgent:
"""
Leon Agent - AI Coding Assistant
@@ -122,6 +140,8 @@ def __init__(
queue_manager: MessageQueueManager | None = None,
chat_repos: dict | None = None,
extra_allowed_paths: list[str] | None = None,
+ extra_blocked_tools: set[str] | None = None,
+ allowed_tools: set[str] | None = None,
verbose: bool = False,
):
"""
@@ -215,15 +235,18 @@ def __init__(
# Initialize checkpointer and MCP tools
self._aiosqlite_conn, mcp_tools = self._init_async_components()
- # If in async context, mark as needing async initialization
- self._needs_async_init = self._aiosqlite_conn is None
-
- # Set checkpointer to None if in async context (will be initialized later)
- if self._needs_async_init:
+ # Set checkpointer to None if in async context (will be set by ainit())
+ if self._aiosqlite_conn is None:
self.checkpointer = None
# Initialize ToolRegistry and Services (new architecture)
- self._tool_registry = ToolRegistry(blocked_tools=self._get_member_blocked_tools())
+ blocked = self._get_member_blocked_tools()
+ if extra_blocked_tools:
+ blocked = blocked | extra_blocked_tools
+ self._tool_registry = ToolRegistry(
+ blocked_tools=blocked,
+ allowed_tools=allowed_tools,
+ )
self._init_services()
# Build middleware stack
@@ -260,13 +283,28 @@ def __init__(
f"not to the chat — only chat_send() delivers to the other party.\n"
)
- # Create agent
- self.agent = create_agent(
+ # Build BootstrapConfig for sub-agent forking
+ self._bootstrap = BootstrapConfig(
+ workspace_root=self.workspace_root,
+ model_name=self.model_name,
+ api_key=self.api_key,
+ block_dangerous_commands=self.block_dangerous_commands,
+ block_network_commands=self.block_network_commands,
+ enable_audit_log=self.enable_audit_log,
+ enable_web_tools=self.enable_web_tools,
+ allowed_file_extensions=self.allowed_file_extensions,
+ )
+ # Inject bootstrap into AgentService so sub-agents can fork from it
+ if hasattr(self, "_agent_service"):
+ self._agent_service._parent_bootstrap = self._bootstrap
+
+ # Create agent via QueryLoop (replaces LangGraph create_agent)
+ self.agent = QueryLoop(
model=self.model,
- tools=mcp_tools,
system_prompt=SystemMessage(content=[{"type": "text", "text": self.system_prompt}]),
middleware=middleware,
- checkpointer=self.checkpointer if not self._needs_async_init else None,
+ checkpointer=self.checkpointer,
+ registry=self._tool_registry,
)
# Get runtime from MonitorMiddleware
@@ -283,11 +321,18 @@ def __init__(
print("[LeonAgent] Initialized successfully")
print(f"[LeonAgent] Workspace: {self.workspace_root}")
print(f"[LeonAgent] Audit log: {self.enable_audit_log}")
- if self._needs_async_init:
+ if self.checkpointer is None:
print("[LeonAgent] Note: Async components need initialization via ainit()")
- # Mark agent as ready (if not needing async init)
- if not self._needs_async_init:
+ # Wire CleanupRegistry for priority-ordered resource teardown
+ self._cleanup_registry = CleanupRegistry()
+ self._cleanup_registry.register(self._cleanup_sandbox, priority=2)
+ self._cleanup_registry.register(self._mark_terminated, priority=3)
+ self._cleanup_registry.register(self._cleanup_mcp_client, priority=4)
+ self._cleanup_registry.register(self._cleanup_sqlite_connection, priority=5)
+
+ # Mark agent as ready (checkpointer is None when async init still pending)
+ if self.checkpointer is not None:
self._monitor_middleware.mark_ready()
async def ainit(self):
@@ -297,7 +342,7 @@ async def ainit(self):
agent = LeonAgent(sandbox=sandbox)
await agent.ainit()
"""
- if not self._needs_async_init:
+ if self.checkpointer is not None:
return # Already initialized
# Initialize async components
@@ -307,8 +352,6 @@ async def ainit(self):
# Update agent with checkpointer
self.agent.checkpointer = self.checkpointer
- # Mark as initialized
- self._needs_async_init = False
self._monitor_middleware.mark_ready()
if self.verbose:
@@ -712,11 +755,25 @@ def update_observation(self, **overrides) -> None:
print(f"[LeonAgent] Observation updated: active={self._observation_config.active}")
def close(self):
- """Clean up resources."""
- self._cleanup_sandbox()
- self._mark_terminated()
- self._cleanup_mcp_client()
- self._cleanup_sqlite_connection()
+ """Clean up resources via CleanupRegistry (priority-ordered).
+
+ Falls back to direct cleanup if CleanupRegistry is not initialized.
+ """
+ if hasattr(self, "_cleanup_registry"):
+ self._run_async_cleanup(self._cleanup_registry.run_cleanup, "CleanupRegistry")
+ else:
+ # Fallback for edge cases where __init__ did not complete fully
+ for step_name, step_fn in [
+ ("sandbox", self._cleanup_sandbox),
+ ("monitor", self._mark_terminated),
+ ("MCP client", self._cleanup_mcp_client),
+ ("SQLite connection", self._cleanup_sqlite_connection),
+ ]:
+ try:
+ step_fn()
+ except Exception as e:
+ print(f"[LeonAgent] {step_name} cleanup error: {e}")
+
def _cleanup_sandbox(self) -> None:
"""Clean up sandbox resources."""
@@ -731,32 +788,29 @@ def _mark_terminated(self) -> None:
if hasattr(self, "_monitor_middleware"):
self._monitor_middleware.mark_terminated()
+ _CLEANUP_TIMEOUT: float = 10.0 # seconds; prevents hanging on stuck I/O
+
@staticmethod
def _run_async_cleanup(coro_factory, label: str) -> None:
import asyncio
try:
- running_loop = asyncio.get_running_loop()
+ asyncio.get_running_loop()
except RuntimeError:
- running_loop = None
-
- if running_loop is None:
asyncio.run(coro_factory())
return
- error: list[Exception] = []
-
- def _runner() -> None:
+ with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool:
+ future = pool.submit(asyncio.run, coro_factory())
try:
- asyncio.run(coro_factory())
+ future.result(timeout=LeonAgent._CLEANUP_TIMEOUT)
+ except concurrent.futures.TimeoutError:
+ raise RuntimeError(
+ f"{label} cleanup timed out after {LeonAgent._CLEANUP_TIMEOUT}s — "
+ f"possible stuck I/O; resource abandoned to prevent hang"
+ )
except Exception as exc:
- error.append(exc)
-
- thread = threading.Thread(target=_runner, daemon=True)
- thread.start()
- thread.join()
- if error:
- raise RuntimeError(f"{label} cleanup failed: {error[0]}") from error[0]
+ raise RuntimeError(f"{label} cleanup failed: {exc}") from exc
def _cleanup_mcp_client(self) -> None:
"""Clean up MCP client."""
@@ -770,29 +824,15 @@ def _cleanup_mcp_client(self) -> None:
self._mcp_client = None
def _cleanup_sqlite_connection(self) -> None:
- """Clean up SQLite connection.
-
- Properly closes aiosqlite connection using asyncio.run() to avoid
- hanging on process exit.
- """
+ """Clean up SQLite connection."""
if not hasattr(self, "_aiosqlite_conn") or not self._aiosqlite_conn:
return
-
+ conn = self._aiosqlite_conn
+ self._aiosqlite_conn = None
try:
- import asyncio
-
- # Close the connection asynchronously
- async def _close():
- if self._aiosqlite_conn:
- await self._aiosqlite_conn.close()
-
- # Use asyncio.run() to properly close the connection
- asyncio.run(_close())
+ self._run_async_cleanup(conn.close, "SQLite connection")
except Exception:
- # Ignore errors during cleanup
pass
- finally:
- self._aiosqlite_conn = None
def __del__(self):
self.close()
@@ -1049,23 +1089,25 @@ def _init_services(self) -> None:
try:
from core.tools.wechat.service import WeChatToolService
- def _get_wechat_conn(eid=owner_eid):
- """Lazy lookup — returns None if registry not on app.state yet."""
- try:
- from backend.web.main import app
-
- registry = getattr(app.state, "wechat_registry", None)
- return registry.get(eid) if registry else None
- except Exception:
- return None
-
self._wechat_tool_service = WeChatToolService(
registry=self._tool_registry,
- connection_fn=_get_wechat_conn,
+ connection_fn=functools.partial(_lookup_wechat_conn, owner_eid),
)
except ImportError:
self._wechat_tool_service = None
+ # LSP tools — DEFERRED, always registered, multilspy checked at call time
+ self._lsp_service = None
+ try:
+ from core.tools.lsp.service import LSPService
+
+ self._lsp_service = LSPService(
+ registry=self._tool_registry,
+ workspace_root=self.workspace_root,
+ )
+ except Exception as e:
+ logger.debug("[LeonAgent] LSPService init skipped: %s", e)
+
if self.verbose:
all_tools = self._tool_registry.list_all()
inline = [t for t in all_tools if t.mode.value == "inline"]
@@ -1170,154 +1212,47 @@ def _build_system_prompt(self) -> str:
return prompt
def _build_context_section(self) -> str:
- """Build the context section based on sandbox mode."""
- if self._sandbox.name != "local":
- env_label = self._sandbox.env_label
- working_dir = self._sandbox.working_dir
- if self._sandbox.name == "docker":
- mode_label = "Sandbox (isolated local container)"
- else:
- mode_label = "Sandbox (isolated cloud environment)"
- return f"""- Environment: {env_label}
-- Working Directory: {working_dir}
-- Mode: {mode_label}"""
- else:
- import platform
-
- os_name = platform.system()
- if os_name == "Windows":
- shell_name = "powershell"
- else:
- shell_name = os.environ.get("SHELL", "/bin/bash").split("/")[-1]
- return f"""- Workspace: `{self.workspace_root}`
-- OS: {os_name}
-- Shell: {shell_name}
-- Mode: Local"""
+ from core.runtime.prompts import build_context_section
- def _build_rules_section(self) -> str:
- """Build shared rules section for all modes."""
is_sandbox = self._sandbox.name != "local"
- working_dir = self._sandbox.working_dir if is_sandbox else self.workspace_root
-
- rules = []
-
- # Rule 1: Environment-specific
if is_sandbox:
- if self._sandbox.name == "docker":
- location_rule = "All file and command operations run in a local Docker container, NOT on the user's host filesystem."
- else:
- location_rule = "All file and command operations run in a remote sandbox, NOT on the user's local machine."
- rules.append(f"1. **Sandbox Environment**: {location_rule} The sandbox is an isolated Linux environment.")
- else:
- rules.append("1. **Workspace**: File operations are restricted to: " + str(self.workspace_root))
-
- # Rule 2: Absolute paths
- rules.append(f"""2. **Absolute Paths**: All file paths must be absolute paths.
- - ✅ Correct: `{working_dir}/project/test.py`
- - ❌ Wrong: `test.py` or `./test.py`""")
-
- # Rule 3: Security
- if is_sandbox:
- rules.append("3. **Security**: The sandbox is isolated. You can install packages, run any commands, and modify files freely.")
- else:
- rules.append("3. **Security**: Dangerous commands are blocked. All operations are logged.")
-
- # Rule 4: Tool priority
- rules.append(
- """4. **Tool Priority**: When a built-in tool and an MCP tool (`mcp__*`) have the same functionality, use the built-in tool."""
+ return build_context_section(
+ sandbox_name=self._sandbox.name,
+ sandbox_env_label=self._sandbox.env_label,
+ sandbox_working_dir=self._sandbox.working_dir,
+ )
+ import platform
+
+ os_name = platform.system()
+ shell_name = "powershell" if os_name == "Windows" else os.environ.get("SHELL", "/bin/bash").split("/")[-1]
+ return build_context_section(
+ sandbox_name="local",
+ workspace_root=str(self.workspace_root),
+ os_name=os_name,
+ shell_name=shell_name,
)
- # Rule 5: Dedicated tools over shell
- rules.append("""5. **Use Dedicated Tools Instead of Shell Commands**: Do NOT use `Bash` for tasks that have dedicated tools:
- - File search → use `Grep` (NOT `rg`, `grep`, or `find` via Bash)
- - File listing → use `Glob` (NOT `find` or `ls` via Bash)
- - File reading → use `Read` (NOT `cat`, `head`, `tail` via Bash)
- - File editing → use `Edit` (NOT `sed` or `awk` via Bash)
- - Reserve `Bash` for: git, package managers, build tools, tests, and other system operations.""")
-
- # Rule 6: Background task description
- rules.append("""6. **Background Task Description**: When using `Bash` or `Agent` with `run_in_background: true`, always include a clear `description` parameter. # noqa: E501
- - The description is shown to the user in the background task indicator.
- - Keep it concise (5–10 words), action-oriented, e.g. "Run test suite", "Analyze API codebase".
- - Without a description, the raw command or agent name is shown, which is hard to read.""")
+ def _build_rules_section(self) -> str:
+ from core.runtime.prompts import build_rules_section
- return "\n\n".join(rules)
+ is_sandbox = self._sandbox.name != "local"
+ working_dir = self._sandbox.working_dir if is_sandbox else str(self.workspace_root)
+ return build_rules_section(
+ is_sandbox=is_sandbox,
+ sandbox_name=self._sandbox.name,
+ working_dir=working_dir,
+ workspace_root=str(self.workspace_root),
+ )
def _build_base_prompt(self) -> str:
- """Build the base system prompt (context + rules), shared by all modes."""
- context = self._build_context_section()
- rules = self._build_rules_section()
+ from core.runtime.prompts import build_base_prompt
- return f"""You are a highly capable AI assistant with access to file and system tools.
-
-**Context:**
-{context}
-
-**Important Rules:**
-
-{rules}
-"""
+ return build_base_prompt(self._build_context_section(), self._build_rules_section())
def _build_common_prompt_sections(self) -> str:
- """Build common prompt sections for both sandbox and local modes."""
- prompt = """
-**Agent Tool (Sub-agent Orchestration):**
-
-Use the Agent tool to launch specialized sub-agents for complex tasks:
-- `explore`: Read-only codebase exploration. Use for: finding files, searching code, understanding implementations.
-- `plan`: Design implementation plans. Use for: architecture decisions, multi-step planning.
-- `bash`: Execute shell commands. Use for: git operations, running tests, system commands.
-- `general`: Full tool access. Use for: independent multi-step tasks requiring file modifications.
-
-When to use Agent:
-- Open-ended searches that may require multiple rounds of exploration
-- Tasks that can run independently while you continue other work
-- Complex operations that benefit from specialized focus
-
-When NOT to use Agent:
-- Simple file reads (use Read directly)
-- Specific searches with known patterns (use Grep directly)
-- Quick operations that don't need isolation
-
-**Todo Tools (Task Management):**
-
-Use Todo tools to track progress on complex, multi-step tasks:
-- `TaskCreate`: Create a new task with subject, description, and activeForm (present continuous for spinner)
-- `TaskList`: View all tasks and their status
-- `TaskGet`: Get full details of a specific task
-- `TaskUpdate`: Update task status (pending → in_progress → completed) or details
-
-When to use Todo:
-- Complex tasks with 3+ distinct steps
-- When the user provides multiple tasks to complete
-- To show progress on non-trivial work
-
-When NOT to use Todo:
-- Single, straightforward tasks
-- Trivial operations that don't need tracking
-"""
-
- # Add Skills section if skills are enabled
- skills_enabled = self.config.skills.enabled and self.config.skills.paths
-
- if skills_enabled:
- prompt += """
-**Skills (Specialized Knowledge):**
-
-Use the `load_skill` tool to access specialized domain knowledge and workflows:
-- Skills provide focused instructions for specific tasks (e.g., TDD, debugging, git workflows)
-- Call `load_skill(skill_name)` to load a skill's content into context
-- Available skills are listed in the load_skill tool description
+ from core.runtime.prompts import build_common_sections
-When to use load_skill:
-- When you need specialized guidance for a specific workflow
-- To access domain-specific best practices
-- When the user mentions a skill by name (e.g., "use TDD skill")
-
-Progressive disclosure: Skills are loaded on-demand to save tokens.
-"""
-
- return prompt
+ return build_common_sections(bool(self.config.skills.enabled and self.config.skills.paths))
def invoke(self, message: str, thread_id: str = "default") -> dict:
"""Invoke agent with a message (sync version).
diff --git a/core/runtime/cleanup.py b/core/runtime/cleanup.py
new file mode 100644
index 000000000..eb7e51733
--- /dev/null
+++ b/core/runtime/cleanup.py
@@ -0,0 +1,72 @@
+"""CleanupRegistry — priority-ordered async cleanup for LeonAgent lifecycle.
+
+Aligned with CC Pattern 5: Lifecycle & Cleanup.
+Priority numbers: lower = runs first.
+"""
+
+from __future__ import annotations
+
+import asyncio
+import logging
+import signal
+from collections.abc import Callable, Awaitable
+
+logger = logging.getLogger(__name__)
+
+
+class CleanupRegistry:
+ """Registry of async cleanup functions executed in priority order on shutdown.
+
+ Usage:
+ registry = CleanupRegistry()
+ registry.register(close_db, priority=1)
+ registry.register(close_sandbox, priority=2)
+ await registry.run_cleanup()
+ """
+
+ def __init__(self):
+ # List of (priority, fn) — not a dict because same priority can have multiple fns
+ self._entries: list[tuple[int, Callable[[], Awaitable[None] | None]]] = []
+ self._setup_signal_handlers()
+
+ def register(self, fn: Callable[[], Awaitable[None] | None], priority: int = 5) -> None:
+ """Register a cleanup function.
+
+ Args:
+ fn: Sync or async callable that releases resources.
+ priority: Execution order — lower number runs first (1 before 2).
+ """
+ self._entries.append((priority, fn))
+
+ async def run_cleanup(self) -> None:
+ """Execute all registered cleanup functions in priority order.
+
+ Runs sequentially (not gathered) so failures are isolated.
+ A failing function is logged but does not prevent later functions from running.
+ """
+ sorted_entries = sorted(self._entries, key=lambda x: x[0])
+ for priority, fn in sorted_entries:
+ try:
+ result = fn()
+ if asyncio.iscoroutine(result):
+ await result
+ except Exception:
+ logger.exception("CleanupRegistry: error in cleanup fn %s (priority=%d)", fn, priority)
+
+ def _setup_signal_handlers(self) -> None:
+ """Register SIGINT/SIGTERM handlers to trigger async cleanup."""
+ try:
+ loop = asyncio.get_event_loop()
+ except RuntimeError:
+ return # No running loop yet — signal handlers set up later
+
+ for sig in (signal.SIGINT, signal.SIGTERM):
+ try:
+ loop.add_signal_handler(sig, self._handle_signal)
+ except (NotImplementedError, RuntimeError):
+ # Windows or non-main thread — skip signal handler setup
+ pass
+
+ def _handle_signal(self) -> None:
+ loop = asyncio.get_event_loop()
+ loop.create_task(self.run_cleanup())
diff --git a/core/runtime/fork.py b/core/runtime/fork.py
new file mode 100644
index 000000000..f3d99e0c7
--- /dev/null
+++ b/core/runtime/fork.py
@@ -0,0 +1,41 @@
+"""Context fork for sub-agent spawning.
+
+When a sub-agent is spawned, it inherits workspace/model/permission configuration
+from the parent but gets its own isolated messages and session identity.
+
+Aligned with CC createSubagentContext() field-by-field fork table.
+"""
+
+from __future__ import annotations
+
+import uuid
+
+from .state import BootstrapConfig
+
+
+def fork_context(parent: BootstrapConfig) -> BootstrapConfig:
+ """Create a child BootstrapConfig for a sub-agent.
+
+ Inherits all workspace identity, model settings, and security flags
+ from parent. Generates a fresh session_id and sets parent_session_id.
+ Messages, cost, and turn_count live in AppState — not here.
+ """
+ return BootstrapConfig(
+ workspace_root=parent.workspace_root,
+ model_name=parent.model_name,
+ api_key=parent.api_key,
+ block_dangerous_commands=parent.block_dangerous_commands,
+ block_network_commands=parent.block_network_commands,
+ enable_audit_log=parent.enable_audit_log,
+ enable_web_tools=parent.enable_web_tools,
+ allowed_file_extensions=parent.allowed_file_extensions,
+ extra_allowed_paths=parent.extra_allowed_paths,
+ max_turns=parent.max_turns,
+ # Fresh session identity
+ session_id=uuid.uuid4().hex,
+ parent_session_id=parent.session_id,
+ # Model settings
+ model_provider=parent.model_provider,
+ base_url=parent.base_url,
+ context_limit=parent.context_limit,
+ )
diff --git a/core/runtime/loop.py b/core/runtime/loop.py
new file mode 100644
index 000000000..626a1eba6
--- /dev/null
+++ b/core/runtime/loop.py
@@ -0,0 +1,409 @@
+"""QueryLoop — self-managing agentic tool loop replacing LangGraph create_agent.
+
+Implements CC Pattern 1: Agentic Tool Loop (queryLoop).
+
+Design:
+- AsyncGenerator that alternates LLM sampling and tool execution.
+- Exposes the same .astream(input, config, stream_mode) interface as CompiledStateGraph.
+- Middleware chain (SpillBuffer/Monitor/PromptCaching/Memory/Steering/ToolRunner) is
+ preserved exactly — awrap_model_call and awrap_tool_call pass through in order.
+- is_concurrency_safe tools execute in parallel; others execute serially.
+- Checkpointer (AsyncSqliteSaver) stores/restores message history across calls.
+"""
+
+from __future__ import annotations
+
+import asyncio
+import logging
+from typing import Any, AsyncGenerator
+
+from langchain.agents.middleware.types import (
+ AgentMiddleware,
+ ModelRequest,
+ ModelResponse,
+ ToolCallRequest,
+)
+from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage
+
+from .registry import ToolRegistry
+
+logger = logging.getLogger(__name__)
+
+_NOOP_HANDLER: Any = None # placeholder for innermost "handler" in middleware chain
+
+
+class QueryLoop:
+ """Self-managing query loop replacing create_agent.
+
+ The .astream() method is an AsyncGenerator that yields dicts compatible
+ with LangGraph's stream_mode="updates":
+ {"agent": {"messages": [AIMessage(...)]}}
+ {"tools": {"messages": [ToolMessage(...), ...]}}
+
+ The checkpointer attribute is set post-construction (mirrors create_agent pattern).
+ """
+
+ def __init__(
+ self,
+ model: Any,
+ system_prompt: SystemMessage,
+ middleware: list[AgentMiddleware],
+ checkpointer: Any,
+ registry: ToolRegistry,
+ max_turns: int = 100,
+ ):
+ self.model = model
+ self.system_prompt = system_prompt
+ self.middleware = middleware
+ self.checkpointer = checkpointer
+ self._registry = registry
+ self.max_turns = max_turns
+
+ # -------------------------------------------------------------------------
+ # Public streaming interface (LangGraph-compatible)
+ # -------------------------------------------------------------------------
+
+ async def astream(
+ self,
+ input: dict,
+ config: dict | None = None,
+ stream_mode: str = "updates",
+ ) -> AsyncGenerator[dict, None]:
+ """Stream agent execution chunks compatible with LangGraph stream_mode='updates'."""
+ config = config or {}
+ thread_id = config.get("configurable", {}).get("thread_id", "default")
+
+ # Set thread context so MemoryMiddleware can find thread_id via ContextVar
+ from sandbox.thread_context import set_current_thread_id
+ set_current_thread_id(thread_id)
+
+ # Load message history from checkpointer
+ messages = await self._load_messages(thread_id)
+
+ # Parse and append new input messages
+ new_msgs = self._parse_input(input)
+ messages.extend(new_msgs)
+
+ turn = 0
+ while turn < self.max_turns:
+ turn += 1
+
+ # --- Call model through middleware chain ---
+ response = await self._invoke_model(messages, config)
+
+ # Extract AI message from response
+ ai_messages = [m for m in response.result if isinstance(m, AIMessage)]
+ if not ai_messages:
+ # No AI message — unexpected; treat as terminal
+ break
+ ai_msg = ai_messages[0]
+
+ # Yield agent update (stream_mode="updates" format)
+ yield {"agent": {"messages": [ai_msg]}}
+
+ # Check for tool calls
+ tool_calls = getattr(ai_msg, "tool_calls", None) or []
+ if not tool_calls:
+ # Also check additional_kwargs for older message formats
+ tool_calls = ai_msg.additional_kwargs.get("tool_calls", [])
+
+ if not tool_calls:
+ # No tool calls → agent is done
+ messages.append(ai_msg)
+ break
+
+ # Expose current messages for forkContext sub-agent spawning
+ from sandbox.thread_context import set_current_messages
+ set_current_messages(messages + [ai_msg])
+
+ # --- Execute tools through middleware chain ---
+ tool_results = await self._execute_tools(tool_calls, response)
+
+ # Yield tools update
+ yield {"tools": {"messages": tool_results}}
+
+ # Advance message history for next turn
+ messages.append(ai_msg)
+ messages.extend(tool_results)
+
+ # Persist message history
+ await self._save_messages(thread_id, messages)
+
+ # -------------------------------------------------------------------------
+ # Model invocation through middleware chain
+ # -------------------------------------------------------------------------
+
+ async def _invoke_model(self, messages: list, config: dict) -> ModelResponse:
+ """Call model through the full middleware chain (awrap_model_call)."""
+
+ async def innermost_handler(request: ModelRequest) -> ModelResponse:
+ """Actual model call — innermost of the chain."""
+ tools = request.tools or []
+ model = request.model
+
+ # Bind tools to model if any
+ if tools:
+ try:
+ bound = model.bind_tools(tools)
+ except Exception:
+ bound = model
+ else:
+ bound = model
+
+ # Build message list: system + conversation
+ call_messages = []
+ if request.system_message:
+ call_messages.append(request.system_message)
+ call_messages.extend(request.messages)
+
+ result = await bound.ainvoke(call_messages)
+ if not isinstance(result, list):
+ result = [result]
+ return ModelResponse(result=result)
+
+ # Build ModelRequest
+ inline_schemas = self._registry.get_inline_schemas()
+ request = ModelRequest(
+ model=self.model,
+ messages=messages,
+ system_message=self.system_prompt,
+ tools=inline_schemas,
+ )
+
+ # Walk middleware chain outside-in: each wraps the next.
+ # Only include middleware that actually overrides awrap_model_call OR wrap_model_call
+ # (not just inherits the base-class NotImplementedError stub).
+ handler = innermost_handler
+ for mw in reversed(self.middleware):
+ if _mw_overrides_model_call(mw):
+ handler = _make_model_wrapper(mw, handler)
+
+ return await handler(request)
+
+ # -------------------------------------------------------------------------
+ # Tool execution through middleware chain
+ # -------------------------------------------------------------------------
+
+ async def _execute_tools(self, tool_calls: list, model_response: ModelResponse) -> list[ToolMessage]:
+ """Execute tool calls respecting concurrency safety, via middleware chain."""
+
+ async def _exec_one(tool_call: dict) -> ToolMessage:
+ name = tool_call.get("name") or tool_call.get("function", {}).get("name", "")
+ call_id = tool_call.get("id", "")
+ args = tool_call.get("args", {}) or tool_call.get("function", {}).get("arguments", {})
+
+ # Normalise args: might be JSON string
+ if isinstance(args, str):
+ import json
+ try:
+ args = json.loads(args)
+ except Exception:
+ args = {}
+
+ normalized_call = {"name": name, "args": args, "id": call_id}
+ tc_request = ToolCallRequest(
+ tool_call=normalized_call,
+ tool=None,
+ state={},
+ runtime=None, # type: ignore[arg-type]
+ )
+
+ async def innermost_tool_handler(req: ToolCallRequest) -> ToolMessage:
+ # Fallback direct dispatch: ToolRunner middleware handles this in
+ # production, but without ToolRunner we dispatch from registry directly.
+ tc = req.tool_call
+ t_name = tc.get("name", "")
+ t_id = tc.get("id", "")
+ t_args = tc.get("args", {})
+ entry = self._registry.get(t_name)
+ if entry is None:
+ return ToolMessage(
+ content=f"Tool '{t_name}' not found",
+ tool_call_id=t_id,
+ name=t_name,
+ )
+ try:
+ import asyncio as _asyncio
+ if _asyncio.iscoroutinefunction(entry.handler):
+ result = await entry.handler(**t_args)
+ else:
+ result = await _asyncio.to_thread(entry.handler, **t_args)
+ return ToolMessage(content=str(result), tool_call_id=t_id, name=t_name)
+ except Exception as e:
+ return ToolMessage(
+ content=f"{e}",
+ tool_call_id=t_id,
+ name=t_name,
+ )
+
+ # Build tool handler chain (outside-in).
+ # Only include middleware that actually overrides awrap_tool_call.
+ tool_handler = innermost_tool_handler
+ for mw in reversed(self.middleware):
+ if _mw_overrides_tool_call(mw):
+ tool_handler = _make_tool_wrapper(mw, tool_handler)
+
+ return await tool_handler(tc_request)
+
+ # Partition tool calls by concurrency safety
+ safe_calls: list[dict] = []
+ unsafe_calls: list[dict] = []
+ for tc in tool_calls:
+ name = tc.get("name") or tc.get("function", {}).get("name", "")
+ entry = self._registry.get(name)
+ if entry and entry.is_concurrency_safe:
+ safe_calls.append(tc)
+ else:
+ unsafe_calls.append(tc)
+
+ results: dict[int, ToolMessage] = {}
+
+ # Execute safe (read-only) tools concurrently
+ if safe_calls:
+ safe_indices = [i for i, tc in enumerate(tool_calls) if tc in safe_calls]
+ safe_results = await asyncio.gather(*[_exec_one(tc) for tc in safe_calls], return_exceptions=True)
+ for idx, res in zip(safe_indices, safe_results):
+ if isinstance(res, Exception):
+ tc = tool_calls[idx]
+ results[idx] = ToolMessage(
+ content=f"{res}",
+ tool_call_id=tc.get("id", ""),
+ name=tc.get("name", ""),
+ )
+ else:
+ results[idx] = res
+
+ # Execute unsafe tools serially
+ for i, tc in enumerate(tool_calls):
+ if tc in unsafe_calls:
+ try:
+ results[i] = await _exec_one(tc)
+ except Exception as e:
+ results[i] = ToolMessage(
+ content=f"{e}",
+ tool_call_id=tc.get("id", ""),
+ name=tc.get("name", ""),
+ )
+
+ # Return results in original order
+ return [results[i] for i in range(len(tool_calls))]
+
+ # -------------------------------------------------------------------------
+ # Checkpointer persistence
+ # -------------------------------------------------------------------------
+
+ async def _load_messages(self, thread_id: str) -> list:
+ """Load message history from checkpointer (if available)."""
+ if self.checkpointer is None:
+ return []
+ try:
+ cfg = {"configurable": {"thread_id": thread_id}}
+ checkpoint = await self.checkpointer.aget(cfg)
+ if checkpoint is None:
+ return []
+ return list(checkpoint.get("channel_values", {}).get("messages", []))
+ except Exception:
+ logger.debug("QueryLoop: could not load checkpoint for thread %s", thread_id)
+ return []
+
+ async def _save_messages(self, thread_id: str, messages: list) -> None:
+ """Persist message history to checkpointer."""
+ if self.checkpointer is None:
+ return
+ try:
+ from langgraph.checkpoint.base import Checkpoint, CheckpointMetadata
+
+ cfg = {"configurable": {"thread_id": thread_id}}
+ existing = await self.checkpointer.aget(cfg)
+ checkpoint_id = existing["id"] if existing else "1"
+
+ checkpoint: Checkpoint = {
+ "v": 1,
+ "id": checkpoint_id,
+ "ts": "",
+ "channel_values": {"messages": messages},
+ "channel_versions": {},
+ "versions_seen": {},
+ "pending_sends": [],
+ }
+ metadata: CheckpointMetadata = {
+ "source": "loop",
+ "step": len(messages),
+ "writes": {},
+ "parents": {},
+ }
+ await self.checkpointer.aput(cfg, checkpoint, metadata, {})
+ except Exception:
+ logger.debug("QueryLoop: could not save checkpoint for thread %s", thread_id, exc_info=True)
+
+ # -------------------------------------------------------------------------
+ # Input parsing
+ # -------------------------------------------------------------------------
+
+ @staticmethod
+ def _parse_input(input: dict) -> list:
+ """Convert input dict to list of LangChain message objects."""
+ raw_messages = input.get("messages", [])
+ result = []
+ for msg in raw_messages:
+ if hasattr(msg, "content"):
+ result.append(msg)
+ elif isinstance(msg, dict):
+ role = msg.get("role", "user")
+ content = msg.get("content", "")
+ if role == "user":
+ result.append(HumanMessage(content=content))
+ elif role == "assistant":
+ result.append(AIMessage(content=content))
+ else:
+ result.append(HumanMessage(content=content))
+ return result
+
+
+# -------------------------------------------------------------------------
+# Closure helpers (avoid late-binding bugs in loop-built lambdas)
+# -------------------------------------------------------------------------
+
+def _make_model_wrapper(mw: AgentMiddleware, next_handler):
+ """Build an awrap_model_call wrapper that correctly closes over mw and next_handler."""
+ async def wrapper(request: ModelRequest) -> ModelResponse:
+ return await mw.awrap_model_call(request, next_handler)
+ return wrapper
+
+
+def _make_tool_wrapper(mw: AgentMiddleware, next_handler):
+ """Build an awrap_tool_call wrapper that correctly closes over mw and next_handler."""
+ async def wrapper(request: ToolCallRequest) -> ToolMessage:
+ return await mw.awrap_tool_call(request, next_handler)
+ return wrapper
+
+
+# -------------------------------------------------------------------------
+# Middleware override detection helpers
+# -------------------------------------------------------------------------
+
+from langchain.agents.middleware.types import AgentMiddleware as _BaseMiddleware
+
+
+def _mw_overrides_model_call(mw: AgentMiddleware) -> bool:
+ """True if mw actually overrides awrap_model_call (not just inherits the base stub)."""
+ # Check if awrap_model_call is overridden in the concrete class
+ mw_type = type(mw)
+ base_fn = getattr(_BaseMiddleware, "awrap_model_call", None)
+ own_fn = mw_type.__dict__.get("awrap_model_call")
+ if own_fn is not None:
+ return True
+ # Fall back: check if wrap_model_call is overridden (sync version is acceptable)
+ base_sync = getattr(_BaseMiddleware, "wrap_model_call", None)
+ own_sync = mw_type.__dict__.get("wrap_model_call")
+ return own_sync is not None
+
+
+def _mw_overrides_tool_call(mw: AgentMiddleware) -> bool:
+ """True if mw actually overrides awrap_tool_call (not just inherits the base stub)."""
+ mw_type = type(mw)
+ own_fn = mw_type.__dict__.get("awrap_tool_call")
+ if own_fn is not None:
+ return True
+ own_sync = mw_type.__dict__.get("wrap_tool_call")
+ return own_sync is not None
diff --git a/core/runtime/middleware/memory/compactor.py b/core/runtime/middleware/memory/compactor.py
index 67599b534..defbb7221 100644
--- a/core/runtime/middleware/memory/compactor.py
+++ b/core/runtime/middleware/memory/compactor.py
@@ -10,13 +10,22 @@
from langchain_core.messages import HumanMessage, SystemMessage
+# CC L4b Legacy Compact: system prompt is simple (~200 tokens) — NOT inherited from parent.
+# Using a distinct simple system prompt prevents reusing the parent conversation's cache
+# (different system prompt → different prefix hash), and reduces input token cost.
+COMPACT_SYSTEM_PROMPT = "You are a helpful AI assistant tasked with summarizing conversations."
+
SUMMARY_PROMPT = """\
-Provide a detailed summary for continuing our conversation. Include:
-1. Key decisions made and their rationale
-2. Files created, modified, or read and their current state
-3. Errors encountered and how they were resolved
-4. Outstanding tasks and current progress
-5. Important context that would be needed to continue the work
+Summarize this conversation in the following 9 sections:
+1. Request/Intent — what the user asked for
+2. Technical Concepts — key technologies and approaches discussed
+3. Files/Code — files created or modified and their current state
+4. Errors — errors encountered and how they were resolved
+5. Problem Solving — decisions made and rationale
+6. User Messages — key user inputs and feedback
+7. Pending Tasks — unfinished work
+8. Current Work — what was actively being done at the end
+9. Next Step — the immediate next action needed
Be concise but retain all information needed to continue seamlessly."""
SPLIT_TURN_PREFIX_PROMPT = """\
@@ -80,19 +89,41 @@ def split_messages(self, messages: list[Any]) -> tuple[list[Any], list[Any]]:
return messages[:split_idx], messages[split_idx:]
- async def compact(self, messages_to_summarize: list[Any], model: Any) -> str:
+ async def compact(
+ self,
+ messages_to_summarize: list[Any],
+ model: Any,
+ compact_boundary: int = 0,
+ ) -> str:
"""Generate a summary of the given messages using the LLM.
+ Aligned with CC L4b Legacy Compact:
+ - Uses COMPACT_SYSTEM_PROMPT (simple, ~200 tokens — NOT parent system prompt)
+ - No tools passed (extended thinking disabled, tools=[])
+ - Slices from compact_boundary forward
+ - max_tokens capped at 20000 (CC max summary output)
+
Returns plain text summary string.
"""
- # Build the summarization request
+ # Slice from compact_boundary forward (CC: from last compact_boundary marker)
+ if compact_boundary > 0 and compact_boundary < len(messages_to_summarize):
+ messages_to_summarize = messages_to_summarize[compact_boundary:]
+
formatted = self._format_messages_for_summary(messages_to_summarize)
+ # CC L4b: system prompt is simple — does NOT inherit parent's system prompt.
+ # No tools, no extended thinking.
summary_messages = [
- SystemMessage(content=SUMMARY_PROMPT),
- HumanMessage(content=f"Here is the conversation to summarize:\n\n{formatted}"),
+ SystemMessage(content=COMPACT_SYSTEM_PROMPT),
+ HumanMessage(content=f"Summarize this conversation:\n\n{formatted}\n\n{SUMMARY_PROMPT}"),
]
- response = await model.ainvoke(summary_messages)
+ # Bind max_tokens=20000 (CC max summary output), no tools
+ try:
+ bound_model = model.bind(max_tokens=20000)
+ except Exception:
+ bound_model = model
+
+ response = await bound_model.ainvoke(summary_messages)
return response.content if hasattr(response, "content") else str(response)
def _estimate_msg_tokens(self, msg: Any) -> int:
diff --git a/core/runtime/middleware/prompt_caching/__init__.py b/core/runtime/middleware/prompt_caching/__init__.py
index 87f4e92b4..f77faded0 100644
--- a/core/runtime/middleware/prompt_caching/__init__.py
+++ b/core/runtime/middleware/prompt_caching/__init__.py
@@ -10,6 +10,7 @@
from warnings import warn
from langchain_anthropic.chat_models import ChatAnthropic
+from langchain_core.messages import SystemMessage
try:
from langchain.agents.middleware.types import (
@@ -68,6 +69,26 @@ def __init__(
self.min_messages_to_cache = min_messages_to_cache
self.unsupported_model_behavior = unsupported_model_behavior
+ def _apply_system_cache(self, request: ModelRequest) -> ModelRequest:
+ """Add cache_control to the first (static) block of system_message.
+
+ Anthropic prompt caching requires cache_control on the system content
+ blocks, not on messages. Marking the first block caches the entire
+ static system prefix (identity + tool rules) across sessions.
+ """
+ sm = request.system_message
+ if sm is None:
+ return request
+ content = sm.content
+ if isinstance(content, str):
+ new_content: list = [{"type": "text", "text": content, "cache_control": {"type": self.type}}]
+ elif isinstance(content, list) and content:
+ first = {**content[0], "cache_control": {"type": self.type}}
+ new_content = [first, *content[1:]]
+ else:
+ return request
+ return request.override(system_message=SystemMessage(content=new_content))
+
def _should_apply_caching(self, request: ModelRequest) -> bool:
"""Check if caching should be applied to the request.
@@ -112,12 +133,7 @@ def wrap_model_call(
"""
if not self._should_apply_caching(request):
return handler(request)
-
- new_model_settings = {
- **request.model_settings,
- "cache_control": {"type": self.type, "ttl": self.ttl},
- }
- return handler(request.override(model_settings=new_model_settings))
+ return handler(self._apply_system_cache(request))
async def awrap_model_call(
self,
@@ -135,12 +151,7 @@ async def awrap_model_call(
"""
if not self._should_apply_caching(request):
return await handler(request)
-
- new_model_settings = {
- **request.model_settings,
- "cache_control": {"type": self.type, "ttl": self.ttl},
- }
- return await handler(request.override(model_settings=new_model_settings))
+ return await handler(self._apply_system_cache(request))
__all__ = ["PromptCachingMiddleware"]
diff --git a/core/runtime/prompts.py b/core/runtime/prompts.py
new file mode 100644
index 000000000..3e790be4e
--- /dev/null
+++ b/core/runtime/prompts.py
@@ -0,0 +1,117 @@
+"""System prompt builders — pure functions, no agent state.
+
+Extracted from LeonAgent so agent.py stays lean.
+
+Middleware Stack
+- MemoryMiddleware: trims/compacts conversation context before model calls.
+- MonitorMiddleware: aggregates runtime metrics and observes model execution.
+- PromptCachingMiddleware: enables Anthropic prompt caching for eligible requests.
+- SteeringMiddleware: drains queued messages and injects them before the next model call.
+- SpillBufferMiddleware: spills oversized tool outputs to disk and replaces them with previews.
+"""
+
+from __future__ import annotations
+
+
+def build_context_section(
+ *,
+ sandbox_name: str,
+ sandbox_env_label: str = "",
+ sandbox_working_dir: str = "",
+ workspace_root: str = "",
+ os_name: str = "",
+ shell_name: str = "",
+) -> str:
+ if sandbox_name != "local":
+ mode_label = (
+ "Sandbox (isolated local container)"
+ if sandbox_name == "docker"
+ else "Sandbox (isolated cloud environment)"
+ )
+ return f"""- Environment: {sandbox_env_label}
+- Working Directory: {sandbox_working_dir}
+- Mode: {mode_label}"""
+ return f"""- Workspace: `{workspace_root}`
+- OS: {os_name}
+- Shell: {shell_name}
+- Mode: Local"""
+
+
+def build_rules_section(
+ *,
+ is_sandbox: bool,
+ sandbox_name: str = "",
+ working_dir: str,
+ workspace_root: str,
+) -> str:
+ rules: list[str] = []
+
+ # Rule 1: Environment-specific
+ if is_sandbox:
+ if sandbox_name == "docker":
+ location_rule = "All file and command operations run in a local Docker container, NOT on the user's host filesystem."
+ else:
+ location_rule = "All file and command operations run in a remote sandbox, NOT on the user's local machine."
+ rules.append(f"1. **Sandbox Environment**: {location_rule} The sandbox is an isolated Linux environment.")
+ else:
+ rules.append("1. **Workspace**: File operations are restricted to: " + workspace_root)
+
+ # Rule 2: Absolute paths
+ rules.append(f"""2. **Absolute Paths**: All file paths must be absolute paths.
+ - ✅ Correct: `{working_dir}/project/test.py`
+ - ❌ Wrong: `test.py` or `./test.py`""")
+
+ # Rule 3: Security
+ if is_sandbox:
+ rules.append("3. **Security**: The sandbox is isolated. You can install packages, run any commands, and modify files freely.")
+ else:
+ rules.append("3. **Security**: Dangerous commands are blocked. All operations are logged.")
+
+ # Rule 4: Tool priority
+ rules.append(
+ """4. **Tool Priority**: When a built-in tool and an MCP tool (`mcp__*`) have the same functionality, use the built-in tool."""
+ )
+
+ # Rule 5: Dedicated tools over shell
+ rules.append("""5. **Use Dedicated Tools Instead of Shell Commands**: Do NOT use `Bash` for tasks that have dedicated tools:
+ - File search → use `Grep` (NOT `rg`, `grep`, or `find` via Bash)
+ - File listing → use `Glob` (NOT `find` or `ls` via Bash)
+ - File reading → use `Read` (NOT `cat`, `head`, `tail` via Bash)
+ - File editing → use `Edit` (NOT `sed` or `awk` via Bash)
+ - Reserve `Bash` for: git, package managers, build tools, tests, and other system operations.""")
+
+ # Rule 6: Background task description
+ rules.append("""6. **Background Task Description**: When using `Bash` or `Agent` with `run_in_background: true`, always include a clear `description` parameter.
+ - The description is shown to the user in the background task indicator.
+ - Keep it concise (5–10 words), action-oriented, e.g. "Run test suite", "Analyze API codebase".
+ - Without a description, the raw command or agent name is shown, which is hard to read.""")
+
+ # Rule 7: Deferred tools
+ rules.append("7. **Deferred Tools**: Some tools are available but not shown by default. Use `tool_search` to discover them by name or keyword.")
+
+ return "\n\n".join(rules)
+
+
+def build_base_prompt(context: str, rules: str) -> str:
+ return f"""You are a highly capable AI assistant with access to file and system tools.
+
+**Context:**
+{context}
+
+**Important Rules:**
+
+{rules}
+"""
+
+
+_AGENT_TOOL_SECTION = """
+**Sub-agent Types:**
+- `explore`: Read-only codebase exploration (Grep, Glob, Read only)
+- `plan`: Architecture design and planning (read-only tools)
+- `bash`: Shell command execution (Bash + read tools)
+- `general`: Full tool access for independent multi-step tasks
+"""
+
+
+def build_common_sections(skills_enabled: bool) -> str:
+ return _AGENT_TOOL_SECTION
diff --git a/core/runtime/registry.py b/core/runtime/registry.py
index f6a87f008..9345b0783 100644
--- a/core/runtime/registry.py
+++ b/core/runtime/registry.py
@@ -20,11 +20,28 @@ class ToolEntry:
schema: SchemaProvider
handler: Handler
source: str
+ search_hint: str = "" # 3-10 word capability description for ToolSearch matching
+ is_concurrency_safe: bool = False # fail-closed: assume not safe
+ is_read_only: bool = False # fail-closed: assume write operation
+ context_schema: dict | None = None # fields this tool needs from ToolUseContext
def get_schema(self) -> dict:
return self.schema() if callable(self.schema) else self.schema
+TOOL_DEFAULTS: dict[str, object] = {
+ "is_concurrency_safe": False,
+ "is_read_only": False,
+ "context_schema": None,
+}
+
+
+def build_tool(**kwargs: object) -> ToolEntry:
+ """Factory that fills in safety defaults. Fail-closed: assumes write + non-concurrent."""
+ merged = {**TOOL_DEFAULTS, **kwargs}
+ return ToolEntry(**merged) # type: ignore[arg-type]
+
+
class ToolRegistry:
"""Central registry for all tools.
@@ -59,19 +76,47 @@ def get_inline_schemas(self) -> list[dict]:
return [e.get_schema() for e in self._tools.values() if e.mode == ToolMode.INLINE]
def search(self, query: str) -> list[ToolEntry]:
- """Return all matching tools (including inline) for tool_search."""
- q = query.lower()
- results = []
+ """Return matching tools with ranked relevance.
+
+ Supports ``select:Name1,Name2`` for exact selection.
+ Otherwise ranks by: search_hint > name > description.
+ """
+ q = query.strip()
+
+ # --- select: exact lookup ---
+ if q.lower().startswith("select:"):
+ names = [n.strip() for n in q[len("select:"):].split(",") if n.strip()]
+ results = [self._tools[n] for n in names if n in self._tools]
+ return results
+
+ # --- keyword search with ranking ---
+ keywords = q.lower().split()
+ if not keywords:
+ return list(self._tools.values())
+
+ scored: list[tuple[int, ToolEntry]] = []
for entry in self._tools.values():
schema = entry.get_schema()
- name = schema.get("name", "")
- desc = schema.get("description", "")
- if q in name.lower() or q in desc.lower():
- results.append(entry)
- # If no match, return all
- if not results:
- results = list(self._tools.values())
- return results
+ name_lower = entry.name.lower()
+ hint_lower = entry.search_hint.lower()
+ desc_lower = schema.get("description", "").lower()
+
+ score = 0
+ for kw in keywords:
+ if kw in hint_lower:
+ score += 3
+ if kw in name_lower:
+ score += 2
+ if kw in desc_lower:
+ score += 1
+ if score > 0:
+ scored.append((score, entry))
+
+ if not scored:
+ return list(self._tools.values())
+
+ scored.sort(key=lambda x: x[0], reverse=True)
+ return [entry for _, entry in scored]
def list_all(self) -> list[ToolEntry]:
return list(self._tools.values())
diff --git a/core/runtime/state.py b/core/runtime/state.py
new file mode 100644
index 000000000..f2b6d0b39
--- /dev/null
+++ b/core/runtime/state.py
@@ -0,0 +1,90 @@
+"""Three-layer state models aligned with CC architecture.
+
+Layer 1: BootstrapConfig — survives /clear, process-level constants
+Layer 2: AppState — per-session mutable state (Zustand-style store)
+Layer 3: ToolUseContext — per-turn, holds live closures to AppState
+"""
+
+from __future__ import annotations
+
+import uuid
+from pathlib import Path
+from typing import Any, Callable
+
+from pydantic import BaseModel, ConfigDict, Field
+
+
+class BootstrapConfig(BaseModel):
+ """Process-level configuration that survives /clear.
+
+ Analogous to CC Bootstrap State (~85 fields). Contains workspace
+ identity, model config, security flags, and API credentials.
+ """
+
+ workspace_root: Path
+ model_name: str
+ api_key: str | None = None
+
+ # Security flags (fail-closed defaults)
+ block_dangerous_commands: bool = True
+ block_network_commands: bool = False
+ enable_audit_log: bool = True
+ enable_web_tools: bool = False
+
+ # File access
+ allowed_file_extensions: list[str] | None = None
+ extra_allowed_paths: list[str] | None = None
+
+ # Turn limits
+ max_turns: int | None = None
+
+ # Session identity
+ session_id: str = Field(default_factory=lambda: uuid.uuid4().hex)
+ parent_session_id: str | None = None
+
+ # Model settings
+ model_provider: str | None = None
+ base_url: str | None = None
+ context_limit: int | None = None
+
+ model_config = ConfigDict(arbitrary_types_allowed=True)
+
+
+class AppState(BaseModel):
+ """Per-session mutable state. Analogous to CC AppState store.
+
+ Implements a minimal Zustand-style store with getState/setState.
+ Not reactive — no subscriptions needed for Python backend.
+ """
+
+ messages: list = Field(default_factory=list)
+ turn_count: int = 0
+ total_cost: float = 0.0
+ compact_boundary_index: int = 0
+ # Map of tool_name -> is_enabled (runtime overrides)
+ tool_overrides: dict[str, bool] = Field(default_factory=dict)
+
+ def get_state(self) -> "AppState":
+ return self
+
+ def set_state(self, updater: Callable[["AppState"], "AppState"]) -> "AppState":
+ updated = updater(self)
+ # Mutate in place (Python idiom — no immutable constraint needed here)
+ for field_name in AppState.model_fields:
+ setattr(self, field_name, getattr(updated, field_name))
+ return self
+
+
+class ToolUseContext(BaseModel):
+ """Per-turn context bag. Analogous to CC ToolUseContext.
+
+ Carries live closures to AppState so tools can read/mutate session state.
+ Sub-agents receive a NO-OP set_app_state to prevent write-through.
+ """
+
+ bootstrap: BootstrapConfig
+ get_app_state: Any = Field(exclude=True) # Callable[[], AppState]
+ set_app_state: Any = Field(exclude=True) # Callable[[AppState], None] | NO-OP
+ turn_id: str = Field(default_factory=lambda: uuid.uuid4().hex[:8])
+
+ model_config = ConfigDict(arbitrary_types_allowed=True)
diff --git a/core/tools/command/service.py b/core/tools/command/service.py
index 475289b9c..1b9459d64 100644
--- a/core/tools/command/service.py
+++ b/core/tools/command/service.py
@@ -63,7 +63,11 @@ def _register(self, registry: ToolRegistry) -> None:
mode=ToolMode.INLINE,
schema={
"name": "Bash",
- "description": ("Execute shell command. OS auto-detects shell (mac->zsh, linux->bash, win->powershell)."),
+ "description": (
+ "Execute shell command (zsh on macOS, bash on Linux, PowerShell on Windows). "
+ "Default timeout 120s (max 600s). Dangerous commands are blocked. "
+ "Prefer dedicated tools over Bash: Read over cat, Grep over grep/rg, Glob over find/ls, Edit over sed/awk."
+ ),
"parameters": {
"type": "object",
"properties": {
diff --git a/core/tools/filesystem/service.py b/core/tools/filesystem/service.py
index a8cf1c9c6..0eadc7516 100644
--- a/core/tools/filesystem/service.py
+++ b/core/tools/filesystem/service.py
@@ -69,7 +69,12 @@ def _register(self, registry: ToolRegistry) -> None:
mode=ToolMode.INLINE,
schema={
"name": "Read",
- "description": ("Read file content (text/code/images/PDF/PPTX/Notebook). Path must be absolute."),
+ "description": (
+ "Read file content. Output uses cat -n format (line numbers starting at 1). "
+ "Default reads up to 2000 lines from start; use offset/limit for long files. "
+ "Supports images (PNG/JPG), PDF (use pages param for large PDFs), and Jupyter notebooks. "
+ "Path must be absolute."
+ ),
"parameters": {
"type": "object",
"properties": {
@@ -85,12 +90,19 @@ def _register(self, registry: ToolRegistry) -> None:
"type": "integer",
"description": "Number of lines to read (optional)",
},
+ "pages": {
+ "type": "string",
+ "description": "Page range for PDF files (e.g. '1-5'). Max 20 pages per request.",
+ },
},
"required": ["file_path"],
},
},
handler=self._read_file,
source="FileSystemService",
+ search_hint="read view file content text code image PDF notebook",
+ is_read_only=True,
+ is_concurrency_safe=True,
)
)
@@ -100,7 +112,10 @@ def _register(self, registry: ToolRegistry) -> None:
mode=ToolMode.INLINE,
schema={
"name": "Write",
- "description": "Create new file. Path must be absolute. Fails if file exists.",
+ "description": (
+ "Create or overwrite a file with full content. Forces LF line endings. "
+ "Fails if file already exists — use Edit for modifications. Path must be absolute."
+ ),
"parameters": {
"type": "object",
"properties": {
@@ -118,6 +133,7 @@ def _register(self, registry: ToolRegistry) -> None:
},
handler=self._write_file,
source="FileSystemService",
+ search_hint="create new file write content to disk",
)
)
@@ -128,10 +144,9 @@ def _register(self, registry: ToolRegistry) -> None:
schema={
"name": "Edit",
"description": (
- "Edit existing file using exact string replacement. "
- "MUST read file before editing. "
- "old_string must be unique in file. "
- "Set replace_all=true to replace all occurrences."
+ "Edit file via exact string replacement. You MUST Read the file first. "
+ "old_string must match exactly one location (or use replace_all=true). "
+ "Does not support .ipynb files (use Write to overwrite full JSON). Path must be absolute."
),
"parameters": {
"type": "object",
@@ -158,6 +173,7 @@ def _register(self, registry: ToolRegistry) -> None:
},
handler=self._edit_file,
source="FileSystemService",
+ search_hint="edit modify replace string in existing file",
)
)
@@ -167,7 +183,7 @@ def _register(self, registry: ToolRegistry) -> None:
mode=ToolMode.INLINE,
schema={
"name": "list_dir",
- "description": "List directory contents. Path must be absolute.",
+ "description": "List directory contents (files and subdirectories, non-recursive). Path must be absolute.",
"parameters": {
"type": "object",
"properties": {
@@ -181,6 +197,9 @@ def _register(self, registry: ToolRegistry) -> None:
},
handler=self._list_dir,
source="FileSystemService",
+ search_hint="list directory contents browse folder",
+ is_read_only=True,
+ is_concurrency_safe=True,
)
)
diff --git a/core/tools/lsp/__init__.py b/core/tools/lsp/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/core/tools/lsp/service.py b/core/tools/lsp/service.py
new file mode 100644
index 000000000..fe6dc79a6
--- /dev/null
+++ b/core/tools/lsp/service.py
@@ -0,0 +1,811 @@
+"""LSP Service - Language Server Protocol code intelligence via multilspy.
+
+Registers a single DEFERRED `LSP` tool with 9 operations:
+ goToDefinition, findReferences, hover, documentSymbol, workspaceSymbol,
+ goToImplementation, prepareCallHierarchy, incomingCalls, outgoingCalls
+
+Sessions are managed by the process-level _LSPSessionPool singleton — they
+start lazily on first use and persist for the lifetime of the process,
+surviving agent restarts. Call `await lsp_pool.close_all()` on process exit.
+
+Supported languages (via multilspy):
+ python, typescript, javascript, go, rust, java, ruby, kotlin, csharp
+"""
+
+from __future__ import annotations
+
+import asyncio
+import json
+import logging
+import os
+import shutil
+import subprocess
+from pathlib import Path
+from typing import Any
+
+_FILE_SIZE_LIMIT = 10 * 1024 * 1024 # 10 MB — matches CC LSP limit
+
+from core.runtime.registry import ToolEntry, ToolMode, ToolRegistry
+
+logger = logging.getLogger(__name__)
+
+LSP_SCHEMA = {
+ "name": "LSP",
+ "description": (
+ "Language Server Protocol code intelligence. "
+ "Operations: goToDefinition, findReferences, hover, documentSymbol, workspaceSymbol, "
+ "goToImplementation, prepareCallHierarchy, incomingCalls, outgoingCalls. "
+ "Language servers are auto-downloaded on first use. "
+ "Supports python, typescript, javascript, go, rust, java, ruby, kotlin. "
+ "file_path must be absolute. line/column are zero-based. "
+ "incomingCalls/outgoingCalls require 'item' from prepareCallHierarchy output."
+ ),
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "operation": {
+ "type": "string",
+ "enum": [
+ "goToDefinition", "findReferences", "hover", "documentSymbol", "workspaceSymbol",
+ "goToImplementation", "prepareCallHierarchy", "incomingCalls", "outgoingCalls",
+ ],
+ "description": "LSP operation to perform",
+ },
+ "file_path": {
+ "type": "string",
+ "description": "Absolute path to file (required for all operations except workspaceSymbol)",
+ },
+ "line": {
+ "type": "integer",
+ "description": "Zero-based line number (required for goToDefinition, findReferences, hover)",
+ },
+ "column": {
+ "type": "integer",
+ "description": "Zero-based column number (required for goToDefinition, findReferences, hover)",
+ },
+ "query": {
+ "type": "string",
+ "description": "Symbol name to search (required for workspaceSymbol)",
+ },
+ "language": {
+ "type": "string",
+ "description": "Language override. Auto-detected from file extension if omitted.",
+ },
+ "item": {
+ "type": "object",
+ "description": "CallHierarchyItem from prepareCallHierarchy (required for incomingCalls/outgoingCalls).",
+ },
+ },
+ "required": ["operation"],
+ },
+}
+
+# File extension → multilspy language identifier
+_EXT_TO_LANG: dict[str, str] = {
+ ".py": "python",
+ ".ts": "typescript",
+ ".tsx": "typescript",
+ ".js": "javascript",
+ ".jsx": "javascript",
+ ".go": "go",
+ ".rs": "rust",
+ ".java": "java",
+ ".rb": "ruby",
+ ".kt": "kotlin",
+ ".cs": "csharp",
+}
+
+
+def _find_pyright() -> str | None:
+ """Locate pyright-langserver: venv-local first, then PATH."""
+ for name in ("pyright-langserver", "pyright_langserver"):
+ # prefer the binary in the same venv as the current interpreter
+ venv_bin = Path(os.__file__).parent.parent.parent / "bin" / name
+ if venv_bin.exists():
+ return str(venv_bin)
+ found = shutil.which(name)
+ if found:
+ return found
+ return None
+
+
+class _PyrightSession:
+ """Minimal asyncio LSP client for pyright-langserver (stdio).
+
+ Used for Python operations not supported by Jedi:
+ goToImplementation, prepareCallHierarchy, incomingCalls, outgoingCalls.
+
+ Requires pyright in the active venv: pip install pyright
+ """
+
+ def __init__(self, workspace_root: str) -> None:
+ self._workspace_root = workspace_root
+ self._proc: asyncio.subprocess.Process | None = None
+ self._pending: dict[int, asyncio.Future] = {}
+ self._next_id = 1
+ self._reader_task: asyncio.Task | None = None
+ self._open_files: set[str] = set()
+
+ async def start(self) -> None:
+ server = _find_pyright()
+ if not server:
+ raise RuntimeError(
+ "pyright-langserver not found. Install with: pip install pyright"
+ )
+ self._proc = await asyncio.create_subprocess_exec(
+ server, "--stdio",
+ stdin=asyncio.subprocess.PIPE,
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.DEVNULL,
+ )
+ self._reader_task = asyncio.create_task(self._read_loop(), name="pyright-reader")
+
+ # LSP handshake
+ await self._request("initialize", {
+ "processId": os.getpid(),
+ "rootUri": Path(self._workspace_root).as_uri(),
+ "capabilities": {
+ "textDocument": {
+ "synchronization": {"dynamicRegistration": False},
+ "implementation": {"dynamicRegistration": False, "linkSupport": True},
+ "callHierarchy": {"dynamicRegistration": False},
+ }
+ },
+ "initializationOptions": {},
+ })
+ self._notify("initialized", {})
+
+ # ── I/O ───────────────────────────────────────────────────────────
+
+ async def _read_loop(self) -> None:
+ try:
+ while True:
+ assert self._proc and self._proc.stdout
+ # Read headers until blank line
+ content_length = 0
+ while True:
+ raw = await self._proc.stdout.readline()
+ if not raw:
+ return
+ line = raw.decode().rstrip()
+ if not line:
+ break
+ if line.lower().startswith("content-length:"):
+ content_length = int(line.split(":", 1)[1].strip())
+ if content_length == 0:
+ continue
+ body = await self._proc.stdout.readexactly(content_length)
+ msg = json.loads(body)
+ # Route response/error to waiting Future
+ msg_id = msg.get("id")
+ msg_method = msg.get("method", "")
+ if msg_id is not None and msg_method:
+ # Server-to-client request — must acknowledge with a response
+ self._write({"jsonrpc": "2.0", "id": msg_id, "result": None})
+ await self._drain()
+ elif msg_id is not None and msg_id in self._pending:
+ fut = self._pending.pop(msg_id)
+ if not fut.done():
+ if "error" in msg:
+ fut.set_exception(RuntimeError(
+ f"{msg['error'].get('message', 'LSP error')} "
+ f"({msg['error'].get('code', '')})"
+ ))
+ else:
+ fut.set_result(msg.get("result"))
+ # All other notifications ($/progress, diagnostics, etc.) are silently dropped
+ except Exception as exc:
+ for fut in self._pending.values():
+ if not fut.done():
+ fut.set_exception(exc)
+
+ def _write(self, msg: dict) -> None:
+ """Encode and buffer one LSP message (call drain() to flush)."""
+ assert self._proc and self._proc.stdin
+ body = json.dumps(msg, separators=(",", ":")).encode()
+ header = f"Content-Length: {len(body)}\r\n\r\n".encode()
+ self._proc.stdin.write(header + body)
+
+ async def _drain(self) -> None:
+ assert self._proc and self._proc.stdin
+ await self._proc.stdin.drain()
+
+ def _notify(self, method: str, params: Any) -> None:
+ self._write({"jsonrpc": "2.0", "method": method, "params": params})
+
+ async def _request(self, method: str, params: Any, timeout: float = 30.0) -> Any:
+ req_id = self._next_id
+ self._next_id += 1
+ loop = asyncio.get_event_loop()
+ fut: asyncio.Future = loop.create_future()
+ self._pending[req_id] = fut
+ self._write({"jsonrpc": "2.0", "id": req_id, "method": method, "params": params})
+ await self._drain()
+ return await asyncio.wait_for(fut, timeout=timeout)
+
+ # ── file lifecycle ────────────────────────────────────────────────
+
+ def _open_file(self, abs_path: str) -> None:
+ uri = Path(abs_path).as_uri()
+ if uri in self._open_files:
+ return
+ try:
+ text = Path(abs_path).read_text(encoding="utf-8", errors="replace")
+ except OSError:
+ text = ""
+ self._notify("textDocument/didOpen", {
+ "textDocument": {"uri": uri, "languageId": "python", "version": 1, "text": text}
+ })
+ self._open_files.add(uri)
+
+ def _close_file(self, abs_path: str) -> None:
+ uri = Path(abs_path).as_uri()
+ if uri not in self._open_files:
+ return
+ self._notify("textDocument/didClose", {"textDocument": {"uri": uri}})
+ self._open_files.discard(uri)
+
+ def _abs(self, rel_path: str) -> str:
+ return str(Path(self._workspace_root) / rel_path)
+
+ # ── LSP operations ────────────────────────────────────────────────
+
+ async def request_implementation(self, rel_path: str, line: int, col: int) -> list:
+ abs_path = self._abs(rel_path)
+ self._open_file(abs_path)
+ await self._drain()
+ uri = Path(abs_path).as_uri()
+ response = await self._request("textDocument/implementation", {
+ "textDocument": {"uri": uri},
+ "position": {"line": line, "character": col},
+ })
+ return self._normalise_locations(response)
+
+ async def request_prepare_call_hierarchy(self, rel_path: str, line: int, col: int) -> list:
+ abs_path = self._abs(rel_path)
+ self._open_file(abs_path)
+ await self._drain()
+ uri = Path(abs_path).as_uri()
+ response = await self._request("textDocument/prepareCallHierarchy", {
+ "textDocument": {"uri": uri},
+ "position": {"line": line, "character": col},
+ })
+ # File stays open — callHierarchy/incomingCalls and outgoingCalls may need it
+ return response or []
+
+ async def request_incoming_calls(self, item: dict) -> list:
+ response = await self._request("callHierarchy/incomingCalls", {"item": item})
+ return response or []
+
+ async def request_outgoing_calls(self, item: dict) -> list:
+ response = await self._request("callHierarchy/outgoingCalls", {"item": item})
+ return response or []
+
+ @staticmethod
+ def _normalise_locations(response: Any) -> list:
+ if not response:
+ return []
+ if isinstance(response, dict):
+ response = [response]
+ out = []
+ for loc in response:
+ uri = loc.get("uri") or loc.get("targetUri", "")
+ rng = loc.get("range") or loc.get("targetSelectionRange") or loc.get("targetRange") or {}
+ out.append({"uri": uri, "absolutePath": uri.replace("file://", ""), "range": rng})
+ return out
+
+ # ── shutdown ──────────────────────────────────────────────────────
+
+ async def stop(self) -> None:
+ if self._proc:
+ try:
+ await asyncio.wait_for(self._request("shutdown", {}), timeout=5)
+ self._notify("exit", {})
+ except Exception:
+ pass
+ try:
+ self._proc.terminate()
+ await asyncio.wait_for(self._proc.wait(), timeout=5)
+ except Exception:
+ self._proc.kill()
+ if self._reader_task and not self._reader_task.done():
+ self._reader_task.cancel()
+ try:
+ await self._reader_task
+ except (asyncio.CancelledError, Exception):
+ pass
+
+
+class _LSPSession:
+ """Holds a multilspy LanguageServer alive in a background asyncio task.
+
+ Pattern: start_server() is an async context manager that must stay open
+ for the lifetime of the session. We enter it inside a background Task and
+ use an Event to signal readiness. Stopping sets a second Event that causes
+ the background task to exit the context and shut down the server process.
+ """
+
+ def __init__(self, language: str, workspace_root: str) -> None:
+ self.language = language
+ self._workspace_root = workspace_root
+ self._ready = asyncio.Event()
+ self._stop = asyncio.Event()
+ self._task: asyncio.Task | None = None
+ self._lsp: Any = None
+ self._error: Exception | None = None
+
+ async def start(self) -> None:
+ self._task = asyncio.create_task(self._run(), name=f"lsp-{self.language}")
+ try:
+ await asyncio.wait_for(asyncio.shield(self._ready.wait()), timeout=60)
+ except asyncio.TimeoutError:
+ raise TimeoutError(f"LSP server for '{self.language}' did not start within 60s")
+ if self._error:
+ raise self._error
+
+ async def _run(self) -> None:
+ try:
+ from multilspy import LanguageServer # core dep — always available
+ from multilspy.multilspy_config import MultilspyConfig
+ from multilspy.multilspy_logger import MultilspyLogger
+
+ config = MultilspyConfig.from_dict({"code_language": self.language})
+ lsp_logger = MultilspyLogger()
+ self._lsp = LanguageServer.create(config, lsp_logger, self._workspace_root)
+ async with self._lsp.start_server():
+ self._ready.set()
+ await self._stop.wait()
+ except Exception as e:
+ self._error = e
+ self._ready.set() # unblock any waiters
+ logger.error("[LSPService] %s server error: %s", self.language, e)
+
+ async def stop(self) -> None:
+ self._stop.set()
+ if self._task and not self._task.done():
+ try:
+ await asyncio.wait_for(self._task, timeout=5)
+ except (asyncio.TimeoutError, asyncio.CancelledError):
+ self._task.cancel()
+ try:
+ await self._task
+ except asyncio.CancelledError:
+ pass
+
+ # ── request methods ───────────────────────────────────────────────
+
+ async def request_definition(self, rel_path: str, line: int, col: int) -> list:
+ try:
+ return await self._lsp.request_definition(rel_path, line, col) or []
+ except AssertionError:
+ return [] # multilspy asserts on None response (no definition found)
+
+ async def request_references(self, rel_path: str, line: int, col: int) -> list:
+ try:
+ return await self._lsp.request_references(rel_path, line, col) or []
+ except AssertionError:
+ return []
+
+ async def request_hover(self, rel_path: str, line: int, col: int) -> Any:
+ try:
+ return await self._lsp.request_hover(rel_path, line, col)
+ except AssertionError:
+ return None
+
+ async def request_document_symbols(self, rel_path: str) -> list:
+ try:
+ symbols, _ = await self._lsp.request_document_symbols(rel_path)
+ return symbols or []
+ except AssertionError:
+ return []
+
+ async def request_workspace_symbol(self, query: str) -> list:
+ return await self._lsp.request_workspace_symbol(query) or []
+
+ # ── advanced ops (direct server.send, for servers that support them) ──
+
+ async def request_implementation(self, rel_path: str, line: int, col: int) -> list:
+ abs_uri = Path(self._workspace_root, rel_path).as_uri()
+ with self._lsp.open_file(rel_path):
+ response = await self._lsp.server.send.implementation(
+ {"textDocument": {"uri": abs_uri}, "position": {"line": line, "character": col}}
+ )
+ if not response:
+ return []
+ if isinstance(response, dict):
+ response = [response]
+ out = []
+ for item in response:
+ if "uri" in item and "range" in item:
+ item.setdefault("absolutePath", item["uri"].replace("file://", ""))
+ out.append(item)
+ elif "targetUri" in item:
+ out.append({
+ "uri": item["targetUri"],
+ "absolutePath": item["targetUri"].replace("file://", ""),
+ "range": item.get("targetSelectionRange", item.get("targetRange", {})),
+ })
+ return out
+
+ async def request_prepare_call_hierarchy(self, rel_path: str, line: int, col: int) -> list:
+ abs_uri = Path(self._workspace_root, rel_path).as_uri()
+ with self._lsp.open_file(rel_path):
+ response = await self._lsp.server.send.prepare_call_hierarchy(
+ {"textDocument": {"uri": abs_uri}, "position": {"line": line, "character": col}}
+ )
+ return response or []
+
+ async def request_incoming_calls(self, item: dict) -> list:
+ response = await self._lsp.server.send.incoming_calls({"item": item})
+ return response or []
+
+ async def request_outgoing_calls(self, item: dict) -> list:
+ response = await self._lsp.server.send.outgoing_calls({"item": item})
+ return response or []
+
+
+class _LSPSessionPool:
+ """Process-level singleton managing LSP sessions across all agent instances.
+
+ Sessions are keyed by (language, workspace_root) and survive agent restarts.
+ Call close_all() once at process exit (e.g. from backend lifespan shutdown).
+ """
+
+ def __init__(self) -> None:
+ # (language, workspace_root) → _LSPSession
+ self._sessions: dict[tuple[str, str], _LSPSession] = {}
+ # workspace_root → _PyrightSession
+ self._pyright: dict[str, _PyrightSession] = {}
+ # In-flight start tasks to prevent duplicate starts under concurrent requests
+ self._starting: dict[tuple[str, str], asyncio.Task] = {}
+ self._starting_pyright: dict[str, asyncio.Task] = {}
+
+ async def get_session(self, language: str, workspace_root: str) -> _LSPSession:
+ key = (language, workspace_root)
+ if key in self._sessions:
+ return self._sessions[key]
+ if key not in self._starting:
+ async def _start() -> _LSPSession:
+ logger.info("[LSPPool] starting %s language server (workspace=%s)...", language, workspace_root)
+ s = _LSPSession(language, workspace_root)
+ await s.start()
+ self._sessions[key] = s
+ self._starting.pop(key, None)
+ logger.info("[LSPPool] %s language server ready", language)
+ return s
+ self._starting[key] = asyncio.create_task(_start(), name=f"lsp-start-{language}")
+ return await self._starting[key]
+
+ async def get_pyright(self, workspace_root: str) -> _PyrightSession:
+ if workspace_root in self._pyright:
+ return self._pyright[workspace_root]
+ if workspace_root not in self._starting_pyright:
+ async def _start() -> _PyrightSession:
+ logger.info("[LSPPool] starting pyright (workspace=%s)...", workspace_root)
+ s = _PyrightSession(workspace_root)
+ await s.start()
+ self._pyright[workspace_root] = s
+ self._starting_pyright.pop(workspace_root, None)
+ logger.info("[LSPPool] pyright ready")
+ return s
+ self._starting_pyright[workspace_root] = asyncio.create_task(_start(), name="lsp-start-pyright")
+ return await self._starting_pyright[workspace_root]
+
+ async def close_all(self) -> None:
+ """Stop all running language server processes. Call once at process exit."""
+ for (lang, ws), session in list(self._sessions.items()):
+ try:
+ await session.stop()
+ logger.debug("[LSPPool] stopped %s server (workspace=%s)", lang, ws)
+ except Exception as e:
+ logger.debug("[LSPPool] error stopping %s: %s", lang, e)
+ self._sessions.clear()
+ for ws, session in list(self._pyright.items()):
+ try:
+ await session.stop()
+ logger.debug("[LSPPool] stopped pyright (workspace=%s)", ws)
+ except Exception as e:
+ logger.debug("[LSPPool] error stopping pyright: %s", e)
+ self._pyright.clear()
+
+
+# Process-level singleton — import and use directly
+lsp_pool = _LSPSessionPool()
+
+
+class LSPService:
+ """Registers the LSP tool (DEFERRED) into ToolRegistry.
+
+ Delegates all session management to the process-level lsp_pool singleton.
+ Language servers start lazily on first use and persist across agent restarts.
+ """
+
+ # Operations that Jedi doesn't support — routed to pyright for Python,
+ # or to the native server.send.* for other languages.
+ _ADVANCED_OPS: frozenset[str] = frozenset(
+ {"goToImplementation", "prepareCallHierarchy", "incomingCalls", "outgoingCalls"}
+ )
+
+ def __init__(self, registry: ToolRegistry, workspace_root: str | Path) -> None:
+ self._workspace_root = str(Path(workspace_root).resolve())
+ registry.register(
+ ToolEntry(
+ name="LSP",
+ mode=ToolMode.DEFERRED,
+ schema=LSP_SCHEMA,
+ handler=self._handle,
+ source="LSPService",
+ search_hint="language server definition references hover symbols go-to",
+ is_read_only=True,
+ is_concurrency_safe=True,
+ )
+ )
+ logger.debug("[LSPService] registered (workspace=%s)", self._workspace_root)
+
+ # ── session management (delegates to process-level pool) ──────────
+
+ async def _get_session(self, language: str) -> _LSPSession:
+ return await lsp_pool.get_session(language, self._workspace_root)
+
+ async def _get_pyright(self) -> _PyrightSession:
+ return await lsp_pool.get_pyright(self._workspace_root)
+
+ def _detect_language(self, file_path: str) -> str | None:
+ return _EXT_TO_LANG.get(Path(file_path).suffix.lower())
+
+ def _to_relative(self, file_path: str) -> str:
+ try:
+ return str(Path(file_path).relative_to(self._workspace_root))
+ except ValueError:
+ return file_path # fallback: pass as-is
+
+ # ── pre-flight checks ─────────────────────────────────────────────
+
+ @staticmethod
+ def _check_file(file_path: str) -> str | None:
+ """Return error string if file exceeds 10 MB limit, else None."""
+ try:
+ size = Path(file_path).stat().st_size
+ except OSError:
+ return None # let LSP handle missing file errors
+ if size > _FILE_SIZE_LIMIT:
+ mb = size / (1024 * 1024)
+ return f"File too large ({mb:.1f} MB). LSP file size limit is 10 MB."
+ return None
+
+ def _filter_gitignored(self, locations: list) -> list:
+ """Filter out locations inside gitignored paths (batches of 50, like CC)."""
+ if not locations:
+ return locations
+ abs_paths = [loc.get("absolutePath") or loc.get("uri", "").replace("file://", "") for loc in locations]
+ try:
+ # git check-ignore exits 0 if any path is ignored, 1 if none are
+ result = subprocess.run(
+ ["git", "check-ignore", "--stdin", "-z"],
+ input="\0".join(abs_paths),
+ capture_output=True,
+ text=True,
+ cwd=self._workspace_root,
+ timeout=5,
+ )
+ ignored = set(result.stdout.split("\0")) if result.stdout else set()
+ except Exception:
+ return locations # on error, return all (fail-open)
+ return [loc for loc, p in zip(locations, abs_paths) if p not in ignored]
+
+ def _filter_gitignored_batched(self, locations: list) -> list:
+ """Run _filter_gitignored in batches of 50 (matches CC batch size)."""
+ out = []
+ for i in range(0, len(locations), 50):
+ out.extend(self._filter_gitignored(locations[i:i + 50]))
+ return out
+
+ # ── output formatters ─────────────────────────────────────────────
+
+ @staticmethod
+ def _fmt_location(loc: Any) -> dict:
+ start = loc.get("range", {}).get("start", {})
+ return {
+ "file": loc.get("absolutePath") or loc.get("uri", ""),
+ "line": start.get("line", 0),
+ "column": start.get("character", 0),
+ }
+
+ @staticmethod
+ def _fmt_hover(result: Any) -> str:
+ contents = result.get("contents", "")
+ if isinstance(contents, dict):
+ return contents.get("value", str(contents))
+ if isinstance(contents, list):
+ parts = []
+ for c in contents:
+ parts.append(c.get("value", str(c)) if isinstance(c, dict) else str(c))
+ return "\n".join(parts)
+ return str(contents)
+
+ @staticmethod
+ def _fmt_symbol(sym: Any) -> dict:
+ loc = sym.get("location") or {}
+ if loc:
+ # SymbolInformation (workspaceSymbol) — location.uri + location.range
+ start = loc.get("range", {}).get("start", {})
+ uri = loc.get("uri", "")
+ file = loc.get("absolutePath") or (uri.replace("file://", "") if uri.startswith("file://") else uri)
+ else:
+ # DocumentSymbol (documentSymbol) — range/selectionRange at top level, no file
+ start = sym.get("selectionRange", sym.get("range", {})).get("start", {})
+ file = ""
+ return {
+ "name": sym.get("name", ""),
+ "kind": sym.get("kind"),
+ "file": file,
+ "line": start.get("line"),
+ }
+
+ @staticmethod
+ def _fmt_call_hierarchy_item(item: Any) -> dict:
+ uri = item.get("uri", "")
+ start = item.get("range", {}).get("start", {})
+ return {
+ "name": item.get("name", ""),
+ "kind": item.get("kind"),
+ "file": uri.replace("file://", "") if uri.startswith("file://") else uri,
+ "line": start.get("line"),
+ "item": item, # pass-through for incomingCalls/outgoingCalls
+ }
+
+ @staticmethod
+ def _fmt_call_hierarchy_call(call: Any, direction: str) -> dict:
+ item_key = "from" if direction == "incoming" else "to"
+ caller = call.get(item_key, {})
+ uri = caller.get("uri", "")
+ start = caller.get("range", {}).get("start", {})
+ ranges = [r.get("start", {}) for r in call.get(f"{item_key}Ranges", [])]
+ return {
+ "name": caller.get("name", ""),
+ "kind": caller.get("kind"),
+ "file": uri.replace("file://", "") if uri.startswith("file://") else uri,
+ "line": start.get("line"),
+ "call_sites": [{"line": r.get("line"), "column": r.get("character")} for r in ranges],
+ "item": caller, # pass-through for chaining
+ }
+
+ # ── tool handler ──────────────────────────────────────────────────
+
+ async def _handle(
+ self,
+ operation: str,
+ file_path: str | None = None,
+ line: int | None = None,
+ column: int | None = None,
+ query: str | None = None,
+ language: str | None = None,
+ item: dict | None = None,
+ ) -> str:
+ # Resolve language (incomingCalls/outgoingCalls carry language in item["uri"])
+ lang = language
+ if not lang and file_path:
+ lang = self._detect_language(file_path)
+ if not lang and operation in ("incomingCalls", "outgoingCalls") and item:
+ uri = item.get("uri", "")
+ lang = self._detect_language(uri)
+ if not lang:
+ supported = ", ".join(sorted(set(_EXT_TO_LANG.values())))
+ return f"Cannot detect language. Set 'language' parameter. Supported: {supported}"
+
+ # 10 MB file size guard (matches CC LSP limit)
+ if file_path:
+ err = self._check_file(file_path)
+ if err:
+ return err
+
+ # Python advanced ops → pyright; other languages → multilspy server.send.*
+ use_pyright = lang == "python" and operation in self._ADVANCED_OPS
+
+ pyright: _PyrightSession | None = None
+ session: _LSPSession | None = None
+
+ if use_pyright:
+ try:
+ pyright = await self._get_pyright()
+ except Exception as e:
+ return f"Failed to start pyright language server: {e}"
+ else:
+ try:
+ session = await self._get_session(lang)
+ except Exception as e:
+ return f"Failed to start {lang} language server: {e}"
+
+ rel = self._to_relative(file_path) if file_path else ""
+
+ try:
+ if operation == "goToDefinition":
+ if not file_path or line is None or column is None:
+ return "goToDefinition requires: file_path, line, column"
+ results = await session.request_definition(rel, line, column)
+ results = self._filter_gitignored_batched(results)
+ if not results:
+ return "No definition found."
+ return json.dumps([self._fmt_location(r) for r in results], indent=2)
+
+ elif operation == "findReferences":
+ if not file_path or line is None or column is None:
+ return "findReferences requires: file_path, line, column"
+ results = await session.request_references(rel, line, column)
+ results = self._filter_gitignored_batched(results)
+ if not results:
+ return "No references found."
+ return json.dumps([self._fmt_location(r) for r in results], indent=2)
+
+ elif operation == "hover":
+ if not file_path or line is None or column is None:
+ return "hover requires: file_path, line, column"
+ result = await session.request_hover(rel, line, column)
+ if not result:
+ return "No hover info."
+ return self._fmt_hover(result)
+
+ elif operation == "documentSymbol":
+ if not file_path:
+ return "documentSymbol requires: file_path"
+ symbols = await session.request_document_symbols(rel)
+ if not symbols:
+ return "No symbols found."
+ return json.dumps([self._fmt_symbol(s) for s in symbols], indent=2)
+
+ elif operation == "workspaceSymbol":
+ if not query:
+ return "workspaceSymbol requires: query"
+ symbols = await session.request_workspace_symbol(query)
+ if not symbols:
+ return f"No symbols matching '{query}'."
+ return json.dumps([self._fmt_symbol(s) for s in symbols], indent=2)
+
+ elif operation == "goToImplementation":
+ if not file_path or line is None or column is None:
+ return "goToImplementation requires: file_path, line, column"
+ src = pyright if use_pyright else session
+ results = await src.request_implementation(rel, line, column)
+ results = self._filter_gitignored_batched(results)
+ if not results:
+ return "No implementation found."
+ return json.dumps([self._fmt_location(r) for r in results], indent=2)
+
+ elif operation == "prepareCallHierarchy":
+ if not file_path or line is None or column is None:
+ return "prepareCallHierarchy requires: file_path, line, column"
+ src = pyright if use_pyright else session
+ items = await src.request_prepare_call_hierarchy(rel, line, column)
+ if not items:
+ return "No call hierarchy items found."
+ return json.dumps([self._fmt_call_hierarchy_item(i) for i in items], indent=2)
+
+ elif operation == "incomingCalls":
+ if not item:
+ return "incomingCalls requires: item (CallHierarchyItem from prepareCallHierarchy)"
+ src = pyright if use_pyright else session
+ calls = await src.request_incoming_calls(item)
+ if not calls:
+ return "No incoming calls found."
+ return json.dumps([self._fmt_call_hierarchy_call(c, "incoming") for c in calls], indent=2)
+
+ elif operation == "outgoingCalls":
+ if not item:
+ return "outgoingCalls requires: item (CallHierarchyItem from prepareCallHierarchy)"
+ src = pyright if use_pyright else session
+ calls = await src.request_outgoing_calls(item)
+ if not calls:
+ return "No outgoing calls found."
+ return json.dumps([self._fmt_call_hierarchy_call(c, "outgoing") for c in calls], indent=2)
+
+ else:
+ return (
+ f"Unknown operation '{operation}'. "
+ "Valid: goToDefinition, findReferences, hover, documentSymbol, workspaceSymbol, "
+ "goToImplementation, prepareCallHierarchy, incomingCalls, outgoingCalls"
+ )
+
+ except Exception as e:
+ logger.exception("[LSPService] operation=%s failed", operation)
+ return f"LSP error: {e}"
+
diff --git a/core/tools/search/service.py b/core/tools/search/service.py
index 4329de6e4..0aacfab01 100644
--- a/core/tools/search/service.py
+++ b/core/tools/search/service.py
@@ -17,6 +17,11 @@
DEFAULT_EXCLUDES: list[str] = [
"node_modules",
".git",
+ ".svn",
+ ".hg",
+ ".bzr",
+ ".jj",
+ ".sl",
"__pycache__",
".venv",
"venv",
@@ -52,7 +57,12 @@ def _register(self, registry: ToolRegistry) -> None:
mode=ToolMode.INLINE,
schema={
"name": "Grep",
- "description": "Search file contents using regex patterns.",
+ "description": (
+ "Regex search across files (ripgrep-based). "
+ "Default output_mode: files_with_matches (sorted by mtime). Default head_limit: 250 entries. "
+ "Auto-excludes .git/.svn/.hg dirs. Max column width 500 chars (suppresses minified/base64). "
+ "Use output_mode='content' with after_context/before_context/context for context lines."
+ ),
"parameters": {
"type": "object",
"properties": {
@@ -105,12 +115,19 @@ def _register(self, registry: ToolRegistry) -> None:
"type": "boolean",
"description": "Allow pattern to span multiple lines",
},
+ "line_numbers": {
+ "type": "boolean",
+ "description": "Show line numbers (default true). Only applies with output_mode='content'.",
+ },
},
"required": ["pattern"],
},
},
handler=self._grep,
source="SearchService",
+ search_hint="search file contents regex pattern matching ripgrep",
+ is_read_only=True,
+ is_concurrency_safe=True,
)
)
@@ -120,7 +137,11 @@ def _register(self, registry: ToolRegistry) -> None:
mode=ToolMode.INLINE,
schema={
"name": "Glob",
- "description": "Find files by glob pattern. Returns paths sorted by modification time.",
+ "description": (
+ "Fast file pattern matching (ripgrep-based). Returns paths sorted by modification time. "
+ "Includes hidden files, ignores .gitignore. Default limit 100 results. "
+ "Use '**/*.py' for recursive search. Path must be absolute."
+ ),
"parameters": {
"type": "object",
"properties": {
@@ -138,6 +159,9 @@ def _register(self, registry: ToolRegistry) -> None:
},
handler=self._glob,
source="SearchService",
+ search_hint="find files by name glob pattern matching",
+ is_read_only=True,
+ is_concurrency_safe=True,
)
)
@@ -183,9 +207,10 @@ def _grep(
before_context: int | None = None,
context: int | None = None,
output_mode: str = "files_with_matches",
- head_limit: int | None = None,
+ head_limit: int | None = 250,
offset: int | None = None,
multiline: bool = False,
+ line_numbers: bool = True,
) -> str:
ok, error, resolved = self._validate_path(path)
if not ok:
@@ -209,6 +234,7 @@ def _grep(
head_limit=head_limit,
offset=offset,
multiline=multiline,
+ line_numbers=line_numbers,
)
except Exception:
pass # fallback to Python
@@ -238,8 +264,9 @@ def _ripgrep_search(
head_limit: int | None,
offset: int | None,
multiline: bool,
+ line_numbers: bool = True,
) -> str:
- cmd: list[str] = ["rg", pattern, str(path)]
+ cmd: list[str] = ["rg", pattern, str(path), "--max-columns", "500"]
for excl in DEFAULT_EXCLUDES:
cmd.extend(["--glob", f"!{excl}"])
@@ -258,7 +285,8 @@ def _ripgrep_search(
elif output_mode == "count":
cmd.append("--count")
elif output_mode == "content":
- cmd.extend(["--line-number", "--no-heading"])
+ ln_flag = "--line-number" if line_numbers else "--no-line-number"
+ cmd.extend([ln_flag, "--no-heading"])
if context is not None:
cmd.extend(["-C", str(context)])
else:
diff --git a/core/tools/skills/service.py b/core/tools/skills/service.py
index e65215a20..c262ed27e 100644
--- a/core/tools/skills/service.py
+++ b/core/tools/skills/service.py
@@ -65,6 +65,8 @@ def _register(self, registry: ToolRegistry) -> None:
schema=self._get_schema,
handler=self._load_skill,
source="SkillsService",
+ is_concurrency_safe=True,
+ is_read_only=True,
)
)
@@ -75,9 +77,10 @@ def _get_schema(self) -> dict:
return {
"name": "load_skill",
"description": (
- f"Load a specialized skill to access domain-specific knowledge and workflows.\n\n"
- f"Available skills:\n{skills_list}\n\n"
- f"Returns the skill's instructions and context."
+ f"Load a skill for domain-specific guidance. "
+ f"Use when you need specialized workflows (TDD, debugging, git). "
+ f"Skills are loaded on-demand to save context.\n\n"
+ f"Available skills:\n{skills_list}"
),
"parameters": {
"type": "object",
diff --git a/core/tools/task/service.py b/core/tools/task/service.py
index a5dacacf1..dd659016d 100644
--- a/core/tools/task/service.py
+++ b/core/tools/task/service.py
@@ -22,7 +22,11 @@
TASK_CREATE_SCHEMA = {
"name": "TaskCreate",
- "description": ("Create a new task to track work progress. Tasks are created with status 'pending'."),
+ "description": (
+ "Create a task to track multi-step work. "
+ "Use for complex tasks with 3+ steps or when managing multiple parallel workstreams. "
+ "Status starts as 'pending'."
+ ),
"parameters": {
"type": "object",
"properties": {
@@ -157,12 +161,14 @@ def _get_thread_id(self) -> str:
return tid or "default"
def _register(self, registry: ToolRegistry) -> None:
+ _READ_ONLY = {"TaskGet", "TaskList"}
for name, schema, handler in [
("TaskCreate", TASK_CREATE_SCHEMA, self._create),
("TaskGet", TASK_GET_SCHEMA, self._get),
("TaskList", TASK_LIST_SCHEMA, self._list),
("TaskUpdate", TASK_UPDATE_SCHEMA, self._update),
]:
+ ro = name in _READ_ONLY
registry.register(
ToolEntry(
name=name,
@@ -170,6 +176,8 @@ def _register(self, registry: ToolRegistry) -> None:
schema=schema,
handler=handler,
source="TaskService",
+ is_concurrency_safe=ro,
+ is_read_only=ro,
)
)
diff --git a/core/tools/tool_search/service.py b/core/tools/tool_search/service.py
index 9b5ceba77..a770b4ca4 100644
--- a/core/tools/tool_search/service.py
+++ b/core/tools/tool_search/service.py
@@ -15,13 +15,18 @@
TOOL_SEARCH_SCHEMA = {
"name": "tool_search",
- "description": ("Search for available tools. Use this to discover tools that might help with your task."),
+ "description": (
+ "Search for available tools by name or keyword. "
+ "Use 'select:ToolA,ToolB' for exact lookup (returns full schema). "
+ "Use keywords for fuzzy search (up to 5 results). "
+ "Deferred tools are only usable after discovery via this tool."
+ ),
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
- "description": "Search query - tool name or description of what you want to do",
+ "description": "Search query. Use 'select:ToolA,ToolB' for exact name lookup, or keywords for fuzzy search.",
},
},
"required": ["query"],
@@ -41,6 +46,8 @@ def __init__(self, registry: ToolRegistry):
schema=TOOL_SEARCH_SCHEMA,
handler=self._search,
source="ToolSearchService",
+ is_concurrency_safe=True,
+ is_read_only=True,
)
)
logger.info("ToolSearchService initialized")
diff --git a/core/tools/web/service.py b/core/tools/web/service.py
index 077db9b70..41bccf5df 100644
--- a/core/tools/web/service.py
+++ b/core/tools/web/service.py
@@ -62,7 +62,10 @@ def _register(self, registry: ToolRegistry) -> None:
mode=ToolMode.INLINE,
schema={
"name": "WebSearch",
- "description": "Search the web for current information. Returns titles, URLs, and snippets.",
+ "description": (
+ "Search the web. Returns titles, URLs, and text snippets. "
+ "Use for current events, documentation lookups, or fact-checking. Max 10 results per query."
+ ),
"parameters": {
"type": "object",
"properties": {
@@ -90,6 +93,8 @@ def _register(self, registry: ToolRegistry) -> None:
},
handler=self._web_search,
source="WebService",
+ is_concurrency_safe=True,
+ is_read_only=True,
)
)
@@ -99,7 +104,11 @@ def _register(self, registry: ToolRegistry) -> None:
mode=ToolMode.INLINE,
schema={
"name": "WebFetch",
- "description": "Fetch a URL and extract specific information using AI. Returns processed content, not raw HTML.",
+ "description": (
+ "Fetch a URL and extract specific information via AI. Returns processed text, not raw HTML. "
+ "Provide a focused prompt describing what to extract. "
+ "Useful for reading documentation pages, API references, or articles."
+ ),
"parameters": {
"type": "object",
"properties": {
@@ -117,6 +126,8 @@ def _register(self, registry: ToolRegistry) -> None:
},
handler=self._web_fetch,
source="WebService",
+ is_concurrency_safe=True,
+ is_read_only=True,
)
)
diff --git a/core/tools/wechat/service.py b/core/tools/wechat/service.py
index 5df2aae14..69a6670e2 100644
--- a/core/tools/wechat/service.py
+++ b/core/tools/wechat/service.py
@@ -33,19 +33,27 @@ def _register(self, registry: ToolRegistry) -> None:
self._register_wechat_send(registry)
self._register_wechat_contacts(registry)
- def _register_wechat_send(self, registry: ToolRegistry) -> None:
- get_conn = self._get_conn
-
- async def handle(user_id: str, text: str) -> str:
- conn = get_conn()
- if not conn or not conn.connected:
- return "Error: WeChat is not connected. Ask the owner to connect via the Connections page."
- try:
- await conn.send_message(user_id, text)
- return f"Message sent to {user_id.split('@')[0]}"
- except RuntimeError as e:
- return f"Error: {e}"
+ async def _handle_send(self, user_id: str, text: str) -> str:
+ conn = self._get_conn()
+ if not conn or not conn.connected:
+ return "Error: WeChat is not connected. Ask the owner to connect via the Connections page."
+ try:
+ await conn.send_message(user_id, text)
+ return f"Message sent to {user_id.split('@')[0]}"
+ except RuntimeError as e:
+ return f"Error: {e}"
+
+ def _handle_contacts(self) -> str:
+ conn = self._get_conn()
+ if not conn or not conn.connected:
+ return "WeChat is not connected."
+ contacts = conn.list_contacts()
+ if not contacts:
+ return "No WeChat contacts yet. Users need to message the bot first."
+ lines = [f"- {c['display_name']} [user_id: {c['user_id']}]" for c in contacts]
+ return "\n".join(lines)
+ def _register_wechat_send(self, registry: ToolRegistry) -> None:
registry.register(
ToolEntry(
name="wechat_send",
@@ -73,24 +81,13 @@ async def handle(user_id: str, text: str) -> str:
"required": ["user_id", "text"],
},
},
- handler=handle,
+ handler=self._handle_send,
source="wechat",
+ search_hint="send wechat message to contact",
)
)
def _register_wechat_contacts(self, registry: ToolRegistry) -> None:
- get_conn = self._get_conn
-
- def handle() -> str:
- conn = get_conn()
- if not conn or not conn.connected:
- return "WeChat is not connected."
- contacts = conn.list_contacts()
- if not contacts:
- return "No WeChat contacts yet. Users need to message the bot first."
- lines = [f"- {c['display_name']} [user_id: {c['user_id']}]" for c in contacts]
- return "\n".join(lines)
-
registry.register(
ToolEntry(
name="wechat_contacts",
@@ -103,7 +100,9 @@ def handle() -> str:
"properties": {},
},
},
- handler=handle,
+ handler=self._handle_contacts,
source="wechat",
+ is_concurrency_safe=True,
+ is_read_only=True,
)
)
diff --git a/pyproject.toml b/pyproject.toml
index 6f55638a5..40edb723b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -44,6 +44,8 @@ dependencies = [
"croniter>=6.0.0",
"uvicorn>=0.30.0",
"sse-starlette>=1.6.0",
+ "multilspy>=0.0.15",
+ "pyright>=1.1.0",
]
[project.optional-dependencies]
@@ -87,6 +89,7 @@ packages = [
"core.tools.filesystem",
"core.tools.filesystem.read",
"core.tools.filesystem.read.readers",
+ "core.tools.lsp",
"core.tools.search",
"core.tools.skills",
"core.tools.task",
diff --git a/sandbox/thread_context.py b/sandbox/thread_context.py
index d52ba7ef1..d98e9895c 100644
--- a/sandbox/thread_context.py
+++ b/sandbox/thread_context.py
@@ -3,10 +3,14 @@
from __future__ import annotations
from contextvars import ContextVar
+from typing import Any
_current_thread_id: ContextVar[str] = ContextVar("sandbox_thread_id", default="")
# @@@run-context - groups file ops per execution unit: checkpoint_id in TUI, run_id in web mode.
_current_run_id: ContextVar[str] = ContextVar("sandbox_run_id", default="")
+# Parent conversation messages — set by QueryLoop before tool execution; read by AgentService
+# for forkContext=True sub-agent spawning.
+_current_messages: ContextVar[list[Any]] = ContextVar("current_messages", default=[])
def set_current_thread_id(thread_id: str) -> None:
@@ -25,3 +29,11 @@ def set_current_run_id(run_id: str) -> None:
def get_current_run_id() -> str | None:
value = _current_run_id.get()
return value if value else None
+
+
+def set_current_messages(messages: list[Any]) -> None:
+ _current_messages.set(list(messages))
+
+
+def get_current_messages() -> list[Any]:
+ return _current_messages.get()
diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/tests/integration/test_leon_agent.py b/tests/integration/test_leon_agent.py
new file mode 100644
index 000000000..bbb70c5a7
--- /dev/null
+++ b/tests/integration/test_leon_agent.py
@@ -0,0 +1,148 @@
+"""Integration tests for LeonAgent with QueryLoop.
+
+Uses mock model to verify the full astream pipeline without real API calls.
+"""
+
+import os
+from pathlib import Path
+from unittest.mock import AsyncMock, MagicMock, patch
+
+import pytest
+from langchain_core.messages import AIMessage, SystemMessage
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+def _mock_model(text="Integration test response"):
+ """Create a mock LangChain model that returns a plain AIMessage."""
+ ai_msg = AIMessage(content=text)
+ model = MagicMock()
+ model.bind_tools.return_value = model
+ model.ainvoke = AsyncMock(return_value=ai_msg)
+ # configurable_fields support
+ model.configurable_fields.return_value = model
+ model.with_config.return_value = model
+ return model
+
+
+def _patch_env_api_key():
+ """Ensure ANTHROPIC_API_KEY is set for LeonAgent init (uses a fake value)."""
+ return patch.dict(os.environ, {"ANTHROPIC_API_KEY": "sk-test-integration"})
+
+
+# ---------------------------------------------------------------------------
+# Integration Tests
+# ---------------------------------------------------------------------------
+
+@pytest.mark.asyncio
+@_patch_env_api_key()
+async def test_leon_agent_simple_run(tmp_path):
+ """LeonAgent with mock model: astream completes and yields chunks."""
+ from core.runtime.agent import LeonAgent
+
+ mock_model = _mock_model("Hello from integration test")
+
+ with patch("core.runtime.agent.LeonAgent._create_model", return_value=mock_model), \
+ patch("core.runtime.agent.LeonAgent._init_async_components", return_value=(None, [])), \
+ patch("core.runtime.agent.LeonAgent._init_checkpointer", new_callable=AsyncMock, return_value=None):
+
+ agent = LeonAgent(workspace_root=str(tmp_path), api_key="sk-test-integration")
+ await agent.ainit()
+
+ results = []
+ async for chunk in agent.agent.astream(
+ {"messages": [{"role": "user", "content": "hello"}]},
+ config={"configurable": {"thread_id": "test-integration-1"}},
+ stream_mode="updates",
+ ):
+ results.append(chunk)
+
+ assert len(results) > 0
+ # At least one agent chunk
+ agent_chunks = [c for c in results if "agent" in c]
+ assert len(agent_chunks) >= 1
+ # Agent message content matches mock
+ first_ai_msgs = agent_chunks[0]["agent"]["messages"]
+ assert any("integration test" in str(m.content) for m in first_ai_msgs)
+
+ agent.close()
+
+
+@pytest.mark.asyncio
+@_patch_env_api_key()
+async def test_leon_agent_astream_interface_compatible(tmp_path):
+ """astream yields dicts with 'agent' key — compatible with LangGraph stream_mode=updates."""
+ from core.runtime.agent import LeonAgent
+
+ mock_model = _mock_model("Compatible response")
+
+ with patch("core.runtime.agent.LeonAgent._create_model", return_value=mock_model), \
+ patch("core.runtime.agent.LeonAgent._init_async_components", return_value=(None, [])), \
+ patch("core.runtime.agent.LeonAgent._init_checkpointer", new_callable=AsyncMock, return_value=None):
+
+ agent = LeonAgent(workspace_root=str(tmp_path), api_key="sk-test-integration")
+ await agent.ainit()
+
+ chunks = []
+ async for chunk in agent.agent.astream(
+ {"messages": [{"role": "user", "content": "test"}]},
+ config={"configurable": {"thread_id": "test-integration-2"}},
+ stream_mode="updates",
+ ):
+ chunks.append(chunk)
+
+ # All chunks are dicts
+ assert all(isinstance(c, dict) for c in chunks)
+ # All keys are one of "agent" or "tools"
+ for c in chunks:
+ assert set(c.keys()).issubset({"agent", "tools"})
+
+ agent.close()
+
+
+@pytest.mark.asyncio
+@_patch_env_api_key()
+async def test_leon_agent_multiple_thread_ids(tmp_path):
+ """Different thread_ids produce independent sessions (no cross-contamination)."""
+ from core.runtime.agent import LeonAgent
+
+ responses = iter(["Response for thread-A", "Response for thread-B"])
+ mock_model = MagicMock()
+ mock_model.bind_tools.return_value = mock_model
+ mock_model.with_config.return_value = mock_model
+ mock_model.configurable_fields.return_value = mock_model
+ mock_model.ainvoke = AsyncMock(side_effect=[
+ AIMessage(content="Response for thread-A"),
+ AIMessage(content="Response for thread-B"),
+ ])
+
+ with patch("core.runtime.agent.LeonAgent._create_model", return_value=mock_model), \
+ patch("core.runtime.agent.LeonAgent._init_async_components", return_value=(None, [])), \
+ patch("core.runtime.agent.LeonAgent._init_checkpointer", new_callable=AsyncMock, return_value=None):
+
+ agent = LeonAgent(workspace_root=str(tmp_path), api_key="sk-test-integration")
+ await agent.ainit()
+
+ chunks_a = []
+ async for chunk in agent.agent.astream(
+ {"messages": [{"role": "user", "content": "hi A"}]},
+ config={"configurable": {"thread_id": "thread-A"}},
+ stream_mode="updates",
+ ):
+ chunks_a.append(chunk)
+
+ chunks_b = []
+ async for chunk in agent.agent.astream(
+ {"messages": [{"role": "user", "content": "hi B"}]},
+ config={"configurable": {"thread_id": "thread-B"}},
+ stream_mode="updates",
+ ):
+ chunks_b.append(chunk)
+
+ # Both sessions produced chunks
+ assert len(chunks_a) > 0
+ assert len(chunks_b) > 0
+
+ agent.close()
diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/tests/unit/test_cleanup.py b/tests/unit/test_cleanup.py
new file mode 100644
index 000000000..1930a8079
--- /dev/null
+++ b/tests/unit/test_cleanup.py
@@ -0,0 +1,74 @@
+"""Unit tests for core.runtime.cleanup CleanupRegistry."""
+
+import asyncio
+
+import pytest
+
+from core.runtime.cleanup import CleanupRegistry
+
+
+@pytest.mark.asyncio
+async def test_runs_in_priority_order():
+ order = []
+ reg = CleanupRegistry()
+ reg.register(lambda: order.append(3), priority=3)
+ reg.register(lambda: order.append(1), priority=1)
+ reg.register(lambda: order.append(2), priority=2)
+ await reg.run_cleanup()
+ assert order == [1, 2, 3]
+
+
+@pytest.mark.asyncio
+async def test_same_priority_runs_all():
+ order = []
+ reg = CleanupRegistry()
+ reg.register(lambda: order.append("a"), priority=5)
+ reg.register(lambda: order.append("b"), priority=5)
+ await reg.run_cleanup()
+ assert set(order) == {"a", "b"}
+
+
+@pytest.mark.asyncio
+async def test_failure_does_not_stop_later_functions():
+ order = []
+ reg = CleanupRegistry()
+
+ def failing():
+ raise RuntimeError("boom")
+
+ reg.register(failing, priority=1)
+ reg.register(lambda: order.append("ok"), priority=2)
+ # Should not raise; failure is logged and execution continues
+ await reg.run_cleanup()
+ assert order == ["ok"]
+
+
+@pytest.mark.asyncio
+async def test_async_cleanup_function():
+ results = []
+
+ async def async_fn():
+ results.append("async")
+
+ reg = CleanupRegistry()
+ reg.register(async_fn, priority=1)
+ await reg.run_cleanup()
+ assert results == ["async"]
+
+
+@pytest.mark.asyncio
+async def test_empty_registry_runs_cleanly():
+ reg = CleanupRegistry()
+ # Should complete without error
+ await reg.run_cleanup()
+
+
+@pytest.mark.asyncio
+async def test_register_multiple_same_priority():
+ order = []
+ reg = CleanupRegistry()
+ for i in range(5):
+ n = i # capture
+ reg.register(lambda n=n: order.append(n), priority=1)
+ await reg.run_cleanup()
+ assert sorted(order) == [0, 1, 2, 3, 4]
diff --git a/tests/unit/test_fork.py b/tests/unit/test_fork.py
new file mode 100644
index 000000000..03a78751d
--- /dev/null
+++ b/tests/unit/test_fork.py
@@ -0,0 +1,79 @@
+"""Unit tests for core.runtime.fork context fork."""
+
+from pathlib import Path
+
+import pytest
+
+from core.runtime.fork import fork_context
+from core.runtime.state import BootstrapConfig
+
+
+@pytest.fixture
+def parent():
+ return BootstrapConfig(
+ workspace_root=Path("/workspace"),
+ model_name="claude-opus-4-5",
+ api_key="sk-parent",
+ block_dangerous_commands=True,
+ block_network_commands=True,
+ enable_audit_log=False,
+ enable_web_tools=True,
+ allowed_file_extensions=[".py"],
+ max_turns=20,
+ model_provider="anthropic",
+ base_url="https://api.anthropic.com",
+ context_limit=200000,
+ )
+
+
+def test_fork_inherits_workspace(parent):
+ child = fork_context(parent)
+ assert child.workspace_root == parent.workspace_root
+
+
+def test_fork_inherits_model(parent):
+ child = fork_context(parent)
+ assert child.model_name == parent.model_name
+ assert child.api_key == parent.api_key
+
+
+def test_fork_inherits_security_flags(parent):
+ child = fork_context(parent)
+ assert child.block_dangerous_commands == parent.block_dangerous_commands
+ assert child.block_network_commands == parent.block_network_commands
+ assert child.enable_audit_log == parent.enable_audit_log
+ assert child.enable_web_tools == parent.enable_web_tools
+
+
+def test_fork_inherits_file_config(parent):
+ child = fork_context(parent)
+ assert child.allowed_file_extensions == parent.allowed_file_extensions
+ assert child.max_turns == parent.max_turns
+
+
+def test_fork_inherits_model_settings(parent):
+ child = fork_context(parent)
+ assert child.model_provider == parent.model_provider
+ assert child.base_url == parent.base_url
+ assert child.context_limit == parent.context_limit
+
+
+def test_fork_generates_new_session_id(parent):
+ child = fork_context(parent)
+ assert child.session_id != parent.session_id
+
+
+def test_fork_sets_parent_session_id(parent):
+ child = fork_context(parent)
+ assert child.parent_session_id == parent.session_id
+
+
+def test_fork_is_independent_object(parent):
+ child = fork_context(parent)
+ assert child is not parent
+
+
+def test_multiple_forks_have_unique_session_ids(parent):
+ children = [fork_context(parent) for _ in range(10)]
+ session_ids = {c.session_id for c in children}
+ assert len(session_ids) == 10
diff --git a/tests/unit/test_loop.py b/tests/unit/test_loop.py
new file mode 100644
index 000000000..59b425980
--- /dev/null
+++ b/tests/unit/test_loop.py
@@ -0,0 +1,216 @@
+"""Unit tests for core.runtime.loop QueryLoop."""
+
+from pathlib import Path
+from unittest.mock import AsyncMock, MagicMock
+
+import pytest
+from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage
+
+from core.runtime.loop import QueryLoop
+from core.runtime.registry import ToolEntry, ToolMode, ToolRegistry
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+def make_registry(*entries):
+ reg = ToolRegistry()
+ for e in entries:
+ reg.register(e)
+ return reg
+
+
+def make_loop(model, registry=None, middleware=None, max_turns=10):
+ return QueryLoop(
+ model=model,
+ system_prompt=SystemMessage(content="You are a test assistant."),
+ middleware=middleware or [],
+ checkpointer=None,
+ registry=registry or make_registry(),
+ max_turns=max_turns,
+ )
+
+
+def mock_model_no_tools(text="Hello!"):
+ """Model that returns a plain AIMessage (no tool calls)."""
+ ai_msg = AIMessage(content=text)
+ model = MagicMock()
+ model.bind_tools.return_value = model
+ model.ainvoke = AsyncMock(return_value=ai_msg)
+ return model
+
+
+def mock_model_with_tool_call(tool_name="echo", args=None, call_id="tc-1", then_text="Done"):
+ """Model that first responds with a tool call, then responds with plain text."""
+ args = args or {"message": "hi"}
+ tool_call_msg = AIMessage(
+ content="",
+ tool_calls=[{"name": tool_name, "args": args, "id": call_id}],
+ )
+ final_msg = AIMessage(content=then_text)
+ model = MagicMock()
+ model.bind_tools.return_value = model
+ model.ainvoke = AsyncMock(side_effect=[tool_call_msg, final_msg])
+ return model
+
+
+# ---------------------------------------------------------------------------
+# Tests: no tool calls → single agent chunk
+# ---------------------------------------------------------------------------
+
+@pytest.mark.asyncio
+async def test_no_tool_calls_yields_one_agent_chunk():
+ model = mock_model_no_tools("Hello world")
+ loop = make_loop(model)
+
+ chunks = []
+ async for chunk in loop.astream({"messages": [{"role": "user", "content": "hi"}]}):
+ chunks.append(chunk)
+
+ assert len(chunks) == 1
+ assert "agent" in chunks[0]
+ msgs = chunks[0]["agent"]["messages"]
+ assert len(msgs) == 1
+ assert msgs[0].content == "Hello world"
+
+
+@pytest.mark.asyncio
+async def test_no_tool_calls_model_called_once():
+ model = mock_model_no_tools()
+ loop = make_loop(model)
+
+ async for _ in loop.astream({"messages": [{"role": "user", "content": "hi"}]}):
+ pass
+
+ assert model.ainvoke.call_count == 1
+
+
+# ---------------------------------------------------------------------------
+# Tests: with tool calls → agent chunk + tools chunk
+# ---------------------------------------------------------------------------
+
+@pytest.mark.asyncio
+async def test_tool_call_yields_agent_then_tools():
+ model = mock_model_with_tool_call()
+
+ # Register a simple echo tool
+ def echo_handler(message: str) -> str:
+ return f"echo: {message}"
+
+ entry = ToolEntry(
+ name="echo",
+ mode=ToolMode.INLINE,
+ schema={"name": "echo", "description": "echo", "parameters": {"type": "object", "properties": {}}},
+ handler=echo_handler,
+ source="test",
+ is_concurrency_safe=True,
+ )
+ registry = make_registry(entry)
+ loop = make_loop(model, registry=registry)
+
+ chunks = []
+ async for chunk in loop.astream({"messages": [{"role": "user", "content": "call echo"}]}):
+ chunks.append(chunk)
+
+ # First chunk: agent (with tool_calls)
+ # Second chunk: tools (ToolMessage results)
+ # Third chunk: agent (final text response)
+ agent_chunks = [c for c in chunks if "agent" in c]
+ tools_chunks = [c for c in chunks if "tools" in c]
+
+ assert len(agent_chunks) >= 1
+ assert len(tools_chunks) >= 1
+
+ # Tool result should be a ToolMessage
+ tool_msgs = tools_chunks[0]["tools"]["messages"]
+ assert len(tool_msgs) == 1
+ assert isinstance(tool_msgs[0], ToolMessage)
+
+
+@pytest.mark.asyncio
+async def test_tool_call_result_content():
+ model = mock_model_with_tool_call(tool_name="echo", args={"message": "test-val"})
+
+ def echo_handler(message: str) -> str:
+ return f"echo: {message}"
+
+ entry = ToolEntry(
+ name="echo",
+ mode=ToolMode.INLINE,
+ schema={"name": "echo", "description": "d", "parameters": {}},
+ handler=echo_handler,
+ source="test",
+ is_concurrency_safe=False,
+ )
+ loop = make_loop(model, registry=make_registry(entry))
+
+ tool_results = []
+ async for chunk in loop.astream({"messages": [{"role": "user", "content": "x"}]}):
+ if "tools" in chunk:
+ tool_results.extend(chunk["tools"]["messages"])
+
+ assert len(tool_results) == 1
+ assert "echo: test-val" in tool_results[0].content
+
+
+# ---------------------------------------------------------------------------
+# Tests: max_turns guard
+# ---------------------------------------------------------------------------
+
+@pytest.mark.asyncio
+async def test_max_turns_stops_loop():
+ """Agent that always calls a tool should stop at max_turns."""
+
+ def noop_handler() -> str:
+ return "ok"
+
+ entry = ToolEntry(
+ name="noop",
+ mode=ToolMode.INLINE,
+ schema={"name": "noop", "description": "d", "parameters": {}},
+ handler=noop_handler,
+ source="test",
+ is_concurrency_safe=True,
+ )
+
+ # Build a model that always returns a tool call
+ tool_call_msg = AIMessage(
+ content="",
+ tool_calls=[{"name": "noop", "args": {}, "id": "tc-1"}],
+ )
+ model = MagicMock()
+ model.bind_tools.return_value = model
+ model.ainvoke = AsyncMock(return_value=tool_call_msg)
+
+ loop = make_loop(model, registry=make_registry(entry), max_turns=3)
+
+ chunks = []
+ async for chunk in loop.astream({"messages": [{"role": "user", "content": "go"}]}):
+ chunks.append(chunk)
+
+ # Should stop after 3 turns (3 agent + 3 tool chunks = 6 total)
+ assert len(chunks) <= 6
+ assert model.ainvoke.call_count == 3
+
+
+# ---------------------------------------------------------------------------
+# Tests: input parsing
+# ---------------------------------------------------------------------------
+
+def test_parse_input_dict_messages():
+ msgs = QueryLoop._parse_input({"messages": [{"role": "user", "content": "hello"}]})
+ assert len(msgs) == 1
+ assert isinstance(msgs[0], HumanMessage)
+ assert msgs[0].content == "hello"
+
+
+def test_parse_input_langchain_messages():
+ human = HumanMessage(content="hi")
+ msgs = QueryLoop._parse_input({"messages": [human]})
+ assert msgs[0] is human
+
+
+def test_parse_input_empty():
+ assert QueryLoop._parse_input({}) == []
+ assert QueryLoop._parse_input({"messages": []}) == []
diff --git a/tests/unit/test_state.py b/tests/unit/test_state.py
new file mode 100644
index 000000000..efc5dc356
--- /dev/null
+++ b/tests/unit/test_state.py
@@ -0,0 +1,102 @@
+"""Unit tests for core.runtime.state three-layer state models."""
+
+from pathlib import Path
+
+import pytest
+
+from core.runtime.state import AppState, BootstrapConfig, ToolUseContext
+
+
+class TestBootstrapConfig:
+ def test_minimal_creation(self):
+ bc = BootstrapConfig(workspace_root=Path("/tmp"), model_name="claude-3-5-sonnet-20241022")
+ assert bc.workspace_root == Path("/tmp")
+ assert bc.model_name == "claude-3-5-sonnet-20241022"
+ assert bc.api_key is None
+
+ def test_security_fail_closed_defaults(self):
+ bc = BootstrapConfig(workspace_root=Path("/tmp"), model_name="test")
+ assert bc.block_dangerous_commands is True
+ assert bc.block_network_commands is False
+ assert bc.enable_audit_log is True
+
+ def test_all_fields(self):
+ bc = BootstrapConfig(
+ workspace_root=Path("/workspace"),
+ model_name="claude-opus-4-5",
+ api_key="sk-test",
+ block_dangerous_commands=False,
+ enable_web_tools=True,
+ allowed_file_extensions=[".py", ".ts"],
+ max_turns=50,
+ )
+ assert bc.api_key == "sk-test"
+ assert bc.enable_web_tools is True
+ assert bc.allowed_file_extensions == [".py", ".ts"]
+ assert bc.max_turns == 50
+
+ def test_session_id_generated(self):
+ bc1 = BootstrapConfig(workspace_root=Path("/tmp"), model_name="test")
+ bc2 = BootstrapConfig(workspace_root=Path("/tmp"), model_name="test")
+ assert bc1.session_id != bc2.session_id
+ assert len(bc1.session_id) == 32 # uuid4().hex
+
+
+class TestAppState:
+ def test_default_values(self):
+ s = AppState()
+ assert s.messages == []
+ assert s.turn_count == 0
+ assert s.total_cost == 0.0
+ assert s.compact_boundary_index == 0
+
+ def test_get_state_returns_self(self):
+ s = AppState()
+ assert s.get_state() is s
+
+ def test_set_state_applies_updater(self):
+ s = AppState()
+ s.set_state(lambda prev: AppState(turn_count=prev.turn_count + 1))
+ assert s.turn_count == 1
+
+ def test_set_state_multiple_fields(self):
+ s = AppState()
+ s.set_state(lambda prev: AppState(turn_count=5, total_cost=1.23))
+ assert s.turn_count == 5
+ assert s.total_cost == 1.23
+
+ def test_tool_overrides(self):
+ s = AppState(tool_overrides={"Bash": False})
+ assert s.tool_overrides["Bash"] is False
+
+
+class TestToolUseContext:
+ def test_creation(self):
+ bc = BootstrapConfig(workspace_root=Path("/tmp"), model_name="test")
+ app_state = AppState()
+ ctx = ToolUseContext(
+ bootstrap=bc,
+ get_app_state=lambda: app_state,
+ set_app_state=lambda _: None,
+ )
+ assert ctx.bootstrap is bc
+ assert ctx.get_app_state() is app_state
+
+ def test_turn_id_generated(self):
+ bc = BootstrapConfig(workspace_root=Path("/tmp"), model_name="test")
+ ctx1 = ToolUseContext(bootstrap=bc, get_app_state=lambda: None, set_app_state=lambda _: None)
+ ctx2 = ToolUseContext(bootstrap=bc, get_app_state=lambda: None, set_app_state=lambda _: None)
+ assert ctx1.turn_id != ctx2.turn_id
+ assert len(ctx1.turn_id) == 8
+
+ def test_subagent_noop_set_state(self):
+ """Sub-agents should use a NO-OP set_app_state to prevent write-through."""
+ bc = BootstrapConfig(workspace_root=Path("/tmp"), model_name="test")
+ app_state = AppState(turn_count=5)
+ calls = []
+ noop = lambda _: calls.append("called")
+ ctx = ToolUseContext(bootstrap=bc, get_app_state=lambda: app_state, set_app_state=noop)
+ ctx.set_app_state(AppState(turn_count=99))
+ # noop was called but original state is unchanged (illustrates isolation pattern)
+ assert len(calls) == 1
+ assert app_state.turn_count == 5
diff --git a/uv.lock b/uv.lock
index 56c598967..e06391166 100644
--- a/uv.lock
+++ b/uv.lock
@@ -366,6 +366,19 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/9d/2a/9186535ce58db529927f6cf5990a849aa9e052eea3e2cfefe20b9e1802da/bracex-2.6-py3-none-any.whl", hash = "sha256:0b0049264e7340b3ec782b5cb99beb325f36c3782a32e36e876452fd49a09952", size = 11508, upload-time = "2025-06-22T19:12:29.781Z" },
]
+[[package]]
+name = "cattrs"
+version = "26.1.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "attrs" },
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/a0/ec/ba18945e7d6e55a58364d9fb2e46049c1c2998b3d805f19b703f14e81057/cattrs-26.1.0.tar.gz", hash = "sha256:fa239e0f0ec0715ba34852ce813986dfed1e12117e209b816ab87401271cdd40", size = 495672, upload-time = "2026-02-18T22:15:19.406Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/80/56/60547f7801b97c67e97491dc3d9ade9fbccbd0325058fd3dfcb2f5d98d90/cattrs-26.1.0-py3-none-any.whl", hash = "sha256:d1e0804c42639494d469d08d4f26d6b9de9b8ab26b446db7b5f8c2e97f7c3096", size = 73054, upload-time = "2026-02-18T22:15:17.958Z" },
+]
+
[[package]]
name = "certifi"
version = "2026.1.4"
@@ -698,6 +711,19 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/55/e2/2537ebcff11c1ee1ff17d8d0b6f4db75873e3b0fb32c2d4a2ee31ecb310a/docstring_parser-0.17.0-py3-none-any.whl", hash = "sha256:cf2569abd23dce8099b300f9b4fa8191e9582dda731fd533daf54c4551658708", size = 36896, upload-time = "2025-07-21T07:35:00.684Z" },
]
+[[package]]
+name = "docstring-to-markdown"
+version = "0.17"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "importlib-metadata" },
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/52/d8/8abe80d62c5dce1075578031bcfde07e735bcf0afe2886dd48b470162ab4/docstring_to_markdown-0.17.tar.gz", hash = "sha256:df72a112294c7492487c9da2451cae0faeee06e86008245c188c5761c9590ca3", size = 32260, upload-time = "2025-05-02T15:09:07.932Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/56/7b/af3d0da15bed3a8665419bb3a630585756920f4ad67abfdfef26240ebcc0/docstring_to_markdown-0.17-py3-none-any.whl", hash = "sha256:fd7d5094aa83943bf5f9e1a13701866b7c452eac19765380dead666e36d3711c", size = 23479, upload-time = "2025-05-02T15:09:06.676Z" },
+]
+
[[package]]
name = "duckduckgo-search"
version = "8.1.1"
@@ -1023,6 +1049,34 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" },
]
+[[package]]
+name = "jedi"
+version = "0.19.2"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "parso" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/72/3a/79a912fbd4d8dd6fbb02bf69afd3bb72cf0c729bb3063c6f4498603db17a/jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0", size = 1231287, upload-time = "2024-11-11T01:41:42.873Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/c0/5a/9cac0c82afec3d09ccd97c8b6502d48f165f9124db81b4bcb90b4af974ee/jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9", size = 1572278, upload-time = "2024-11-11T01:41:40.175Z" },
+]
+
+[[package]]
+name = "jedi-language-server"
+version = "0.41.3"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "cattrs" },
+ { name = "docstring-to-markdown" },
+ { name = "jedi" },
+ { name = "lsprotocol" },
+ { name = "pygls" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/f3/34/4a35094c680040c8dd598b1ee9153a701289351c1dcbad1a0f2d196c524b/jedi_language_server-0.41.3.tar.gz", hash = "sha256:113ec22b95fadaceefbb704b5f365384bed296b82ede59026be375ecc97a9f8a", size = 29113, upload-time = "2024-02-26T04:28:05.521Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/b6/67/2cf4419a8c418b0e5cba0b43dc1ea33a0bb42907694d6a786a3644889f32/jedi_language_server-0.41.3-py3-none-any.whl", hash = "sha256:7411f7479cdc9e9ea495f91e20b182a5d00170c0a8a4a87d3a147462282c06af", size = 27615, upload-time = "2024-02-26T04:28:02.084Z" },
+]
+
[[package]]
name = "jiter"
version = "0.12.0"
@@ -1339,9 +1393,11 @@ dependencies = [
{ name = "langchain-openai" },
{ name = "langgraph" },
{ name = "langgraph-checkpoint-sqlite" },
+ { name = "multilspy" },
{ name = "pillow" },
{ name = "pydantic" },
{ name = "pyjwt" },
+ { name = "pyright" },
{ name = "pyyaml" },
{ name = "rich" },
{ name = "sse-starlette" },
@@ -1427,6 +1483,7 @@ requires-dist = [
{ name = "langgraph-checkpoint-sqlite", specifier = ">=2.0.0" },
{ name = "langsmith", marker = "extra == 'all'", specifier = ">=0.1.0" },
{ name = "langsmith", marker = "extra == 'langsmith'", specifier = ">=0.1.0" },
+ { name = "multilspy", specifier = ">=0.0.15" },
{ name = "opentelemetry-api", marker = "extra == 'otel'", specifier = ">=1.20.0" },
{ name = "opentelemetry-exporter-otlp", marker = "extra == 'otel'", specifier = ">=1.20.0" },
{ name = "opentelemetry-sdk", marker = "extra == 'otel'", specifier = ">=1.20.0" },
@@ -1436,6 +1493,7 @@ requires-dist = [
{ name = "pymupdf", marker = "extra == 'all'", specifier = ">=1.24.0" },
{ name = "pymupdf", marker = "extra == 'docs'", specifier = ">=1.24.0" },
{ name = "pymupdf", marker = "extra == 'pdf'", specifier = ">=1.24.0" },
+ { name = "pyright", specifier = ">=1.1.0" },
{ name = "python-pptx", marker = "extra == 'all'", specifier = ">=1.0.0" },
{ name = "python-pptx", marker = "extra == 'docs'", specifier = ">=1.0.0" },
{ name = "python-pptx", marker = "extra == 'pptx'", specifier = ">=1.0.0" },
@@ -1473,6 +1531,19 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/0c/29/0348de65b8cc732daa3e33e67806420b2ae89bdce2b04af740289c5c6c8c/loguru-0.7.3-py3-none-any.whl", hash = "sha256:31a33c10c8e1e10422bfd431aeb5d351c7cf7fa671e3c4df004162264b28220c", size = 61595, upload-time = "2024-12-06T11:20:54.538Z" },
]
+[[package]]
+name = "lsprotocol"
+version = "2023.0.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "attrs" },
+ { name = "cattrs" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/9d/f6/6e80484ec078d0b50699ceb1833597b792a6c695f90c645fbaf54b947e6f/lsprotocol-2023.0.1.tar.gz", hash = "sha256:cc5c15130d2403c18b734304339e51242d3018a05c4f7d0f198ad6e0cd21861d", size = 69434, upload-time = "2024-01-09T17:21:12.625Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/8d/37/2351e48cb3309673492d3a8c59d407b75fb6630e560eb27ecd4da03adc9a/lsprotocol-2023.0.1-py3-none-any.whl", hash = "sha256:c75223c9e4af2f24272b14c6375787438279369236cd568f596d4951052a60f2", size = 70826, upload-time = "2024-01-09T17:21:14.491Z" },
+]
+
[[package]]
name = "lxml"
version = "6.0.2"
@@ -1707,6 +1778,21 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/81/08/7036c080d7117f28a4af526d794aab6a84463126db031b007717c1a6676e/multidict-6.7.1-py3-none-any.whl", hash = "sha256:55d97cc6dae627efa6a6e548885712d4864b81110ac76fa4e534c03819fa4a56", size = 12319, upload-time = "2026-01-26T02:46:44.004Z" },
]
+[[package]]
+name = "multilspy"
+version = "0.0.15"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "jedi-language-server" },
+ { name = "psutil" },
+ { name = "requests" },
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/d8/a8/4d6ab48e624f911eb5229aa01b3524b916470c9d036a9e8cc96d6fb81673/multilspy-0.0.15.tar.gz", hash = "sha256:b27a0b7c5c5306216b31fe1df9b4a42d2797735d0a78928e0df9ef8dfbcc97c5", size = 120639, upload-time = "2025-04-03T07:01:27.216Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/97/4d/b9d3492d6a7a2536498fc7fd49c1cc7bc86a41acf93b0ad967d75dbe5cd6/multilspy-0.0.15-py3-none-any.whl", hash = "sha256:3fa88939b953ed5d39aba4688a34105ec1e5cf2b2f778167fee2b78b3c0e1427", size = 137361, upload-time = "2025-04-03T07:01:25.492Z" },
+]
+
[[package]]
name = "multipart"
version = "1.3.0"
@@ -2007,6 +2093,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" },
]
+[[package]]
+name = "parso"
+version = "0.8.6"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/81/76/a1e769043c0c0c9fe391b702539d594731a4362334cdf4dc25d0c09761e7/parso-0.8.6.tar.gz", hash = "sha256:2b9a0332696df97d454fa67b81618fd69c35a7b90327cbe6ba5c92d2c68a7bfd", size = 401621, upload-time = "2026-02-09T15:45:24.425Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/b6/61/fae042894f4296ec49e3f193aff5d7c18440da9e48102c3315e1bc4519a7/parso-0.8.6-py2.py3-none-any.whl", hash = "sha256:2c549f800b70a5c4952197248825584cb00f033b29c692671d3bf08bf380baff", size = 106894, upload-time = "2026-02-09T15:45:21.391Z" },
+]
+
[[package]]
name = "pillow"
version = "12.1.0"
@@ -2219,6 +2314,34 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/57/bf/2086963c69bdac3d7cff1cc7ff79b8ce5ea0bec6797a017e1be338a46248/protobuf-6.33.5-py3-none-any.whl", hash = "sha256:69915a973dd0f60f31a08b8318b73eab2bd6a392c79184b3612226b0a3f8ec02", size = 170687, upload-time = "2026-01-29T21:51:32.557Z" },
]
+[[package]]
+name = "psutil"
+version = "7.2.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/aa/c6/d1ddf4abb55e93cebc4f2ed8b5d6dbad109ecb8d63748dd2b20ab5e57ebe/psutil-7.2.2.tar.gz", hash = "sha256:0746f5f8d406af344fd547f1c8daa5f5c33dbc293bb8d6a16d80b4bb88f59372", size = 493740, upload-time = "2026-01-28T18:14:54.428Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/51/08/510cbdb69c25a96f4ae523f733cdc963ae654904e8db864c07585ef99875/psutil-7.2.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:2edccc433cbfa046b980b0df0171cd25bcaeb3a68fe9022db0979e7aa74a826b", size = 130595, upload-time = "2026-01-28T18:14:57.293Z" },
+ { url = "https://files.pythonhosted.org/packages/d6/f5/97baea3fe7a5a9af7436301f85490905379b1c6f2dd51fe3ecf24b4c5fbf/psutil-7.2.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e78c8603dcd9a04c7364f1a3e670cea95d51ee865e4efb3556a3a63adef958ea", size = 131082, upload-time = "2026-01-28T18:14:59.732Z" },
+ { url = "https://files.pythonhosted.org/packages/37/d6/246513fbf9fa174af531f28412297dd05241d97a75911ac8febefa1a53c6/psutil-7.2.2-cp313-cp313t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1a571f2330c966c62aeda00dd24620425d4b0cc86881c89861fbc04549e5dc63", size = 181476, upload-time = "2026-01-28T18:15:01.884Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/b5/9182c9af3836cca61696dabe4fd1304e17bc56cb62f17439e1154f225dd3/psutil-7.2.2-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:917e891983ca3c1887b4ef36447b1e0873e70c933afc831c6b6da078ba474312", size = 184062, upload-time = "2026-01-28T18:15:04.436Z" },
+ { url = "https://files.pythonhosted.org/packages/16/ba/0756dca669f5a9300d0cbcbfae9a4c30e446dfc7440ffe43ded5724bfd93/psutil-7.2.2-cp313-cp313t-win_amd64.whl", hash = "sha256:ab486563df44c17f5173621c7b198955bd6b613fb87c71c161f827d3fb149a9b", size = 139893, upload-time = "2026-01-28T18:15:06.378Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/61/8fa0e26f33623b49949346de05ec1ddaad02ed8ba64af45f40a147dbfa97/psutil-7.2.2-cp313-cp313t-win_arm64.whl", hash = "sha256:ae0aefdd8796a7737eccea863f80f81e468a1e4cf14d926bd9b6f5f2d5f90ca9", size = 135589, upload-time = "2026-01-28T18:15:08.03Z" },
+ { url = "https://files.pythonhosted.org/packages/81/69/ef179ab5ca24f32acc1dac0c247fd6a13b501fd5534dbae0e05a1c48b66d/psutil-7.2.2-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:eed63d3b4d62449571547b60578c5b2c4bcccc5387148db46e0c2313dad0ee00", size = 130664, upload-time = "2026-01-28T18:15:09.469Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/64/665248b557a236d3fa9efc378d60d95ef56dd0a490c2cd37dafc7660d4a9/psutil-7.2.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7b6d09433a10592ce39b13d7be5a54fbac1d1228ed29abc880fb23df7cb694c9", size = 131087, upload-time = "2026-01-28T18:15:11.724Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/2e/e6782744700d6759ebce3043dcfa661fb61e2fb752b91cdeae9af12c2178/psutil-7.2.2-cp314-cp314t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1fa4ecf83bcdf6e6c8f4449aff98eefb5d0604bf88cb883d7da3d8d2d909546a", size = 182383, upload-time = "2026-01-28T18:15:13.445Z" },
+ { url = "https://files.pythonhosted.org/packages/57/49/0a41cefd10cb7505cdc04dab3eacf24c0c2cb158a998b8c7b1d27ee2c1f5/psutil-7.2.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e452c464a02e7dc7822a05d25db4cde564444a67e58539a00f929c51eddda0cf", size = 185210, upload-time = "2026-01-28T18:15:16.002Z" },
+ { url = "https://files.pythonhosted.org/packages/dd/2c/ff9bfb544f283ba5f83ba725a3c5fec6d6b10b8f27ac1dc641c473dc390d/psutil-7.2.2-cp314-cp314t-win_amd64.whl", hash = "sha256:c7663d4e37f13e884d13994247449e9f8f574bc4655d509c3b95e9ec9e2b9dc1", size = 141228, upload-time = "2026-01-28T18:15:18.385Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/fc/f8d9c31db14fcec13748d373e668bc3bed94d9077dbc17fb0eebc073233c/psutil-7.2.2-cp314-cp314t-win_arm64.whl", hash = "sha256:11fe5a4f613759764e79c65cf11ebdf26e33d6dd34336f8a337aa2996d71c841", size = 136284, upload-time = "2026-01-28T18:15:19.912Z" },
+ { url = "https://files.pythonhosted.org/packages/e7/36/5ee6e05c9bd427237b11b3937ad82bb8ad2752d72c6969314590dd0c2f6e/psutil-7.2.2-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ed0cace939114f62738d808fdcecd4c869222507e266e574799e9c0faa17d486", size = 129090, upload-time = "2026-01-28T18:15:22.168Z" },
+ { url = "https://files.pythonhosted.org/packages/80/c4/f5af4c1ca8c1eeb2e92ccca14ce8effdeec651d5ab6053c589b074eda6e1/psutil-7.2.2-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:1a7b04c10f32cc88ab39cbf606e117fd74721c831c98a27dc04578deb0c16979", size = 129859, upload-time = "2026-01-28T18:15:23.795Z" },
+ { url = "https://files.pythonhosted.org/packages/b5/70/5d8df3b09e25bce090399cf48e452d25c935ab72dad19406c77f4e828045/psutil-7.2.2-cp36-abi3-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:076a2d2f923fd4821644f5ba89f059523da90dc9014e85f8e45a5774ca5bc6f9", size = 155560, upload-time = "2026-01-28T18:15:25.976Z" },
+ { url = "https://files.pythonhosted.org/packages/63/65/37648c0c158dc222aba51c089eb3bdfa238e621674dc42d48706e639204f/psutil-7.2.2-cp36-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b0726cecd84f9474419d67252add4ac0cd9811b04d61123054b9fb6f57df6e9e", size = 156997, upload-time = "2026-01-28T18:15:27.794Z" },
+ { url = "https://files.pythonhosted.org/packages/8e/13/125093eadae863ce03c6ffdbae9929430d116a246ef69866dad94da3bfbc/psutil-7.2.2-cp36-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:fd04ef36b4a6d599bbdb225dd1d3f51e00105f6d48a28f006da7f9822f2606d8", size = 148972, upload-time = "2026-01-28T18:15:29.342Z" },
+ { url = "https://files.pythonhosted.org/packages/04/78/0acd37ca84ce3ddffaa92ef0f571e073faa6d8ff1f0559ab1272188ea2be/psutil-7.2.2-cp36-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b58fabe35e80b264a4e3bb23e6b96f9e45a3df7fb7eed419ac0e5947c61e47cc", size = 148266, upload-time = "2026-01-28T18:15:31.597Z" },
+ { url = "https://files.pythonhosted.org/packages/b4/90/e2159492b5426be0c1fef7acba807a03511f97c5f86b3caeda6ad92351a7/psutil-7.2.2-cp37-abi3-win_amd64.whl", hash = "sha256:eb7e81434c8d223ec4a219b5fc1c47d0417b12be7ea866e24fb5ad6e84b3d988", size = 137737, upload-time = "2026-01-28T18:15:33.849Z" },
+ { url = "https://files.pythonhosted.org/packages/8c/c7/7bb2e321574b10df20cbde462a94e2b71d05f9bbda251ef27d104668306a/psutil-7.2.2-cp37-abi3-win_arm64.whl", hash = "sha256:8c233660f575a5a89e6d4cb65d9f938126312bca76d8fe087b947b3a1aaac9ee", size = 134617, upload-time = "2026-01-28T18:15:36.514Z" },
+]
+
[[package]]
name = "pycparser"
version = "3.0"
@@ -2340,6 +2463,19 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/9b/4d/b9add7c84060d4c1906abe9a7e5359f2a60f7a9a4f67268b2766673427d8/pyee-13.0.0-py3-none-any.whl", hash = "sha256:48195a3cddb3b1515ce0695ed76036b5ccc2ef3a9f963ff9f77aec0139845498", size = 15730, upload-time = "2025-03-17T18:53:14.532Z" },
]
+[[package]]
+name = "pygls"
+version = "1.3.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "cattrs" },
+ { name = "lsprotocol" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/86/b9/41d173dad9eaa9db9c785a85671fc3d68961f08d67706dc2e79011e10b5c/pygls-1.3.1.tar.gz", hash = "sha256:140edceefa0da0e9b3c533547c892a42a7d2fd9217ae848c330c53d266a55018", size = 45527, upload-time = "2024-03-26T18:44:25.679Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/11/19/b74a10dd24548e96e8c80226cbacb28b021bc3a168a7d2709fb0d0185348/pygls-1.3.1-py3-none-any.whl", hash = "sha256:6e00f11efc56321bdeb6eac04f6d86131f654c7d49124344a9ebb968da3dd91e", size = 56031, upload-time = "2024-03-26T18:44:24.249Z" },
+]
+
[[package]]
name = "pygments"
version = "2.19.2"
@@ -2661,7 +2797,7 @@ wheels = [
[[package]]
name = "requests"
-version = "2.32.5"
+version = "2.32.3"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "certifi" },
@@ -2669,9 +2805,9 @@ dependencies = [
{ name = "idna" },
{ name = "urllib3" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218, upload-time = "2024-05-29T15:37:49.536Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928, upload-time = "2024-05-29T15:37:47.027Z" },
]
[[package]]