diff --git a/Dockerfile b/Dockerfile index d33e1c7..d5b3a65 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Base image for SWE-bench task environments -FROM python:3.11-slim +FROM mcr.microsoft.com/mirror/docker/library/python:3.11-slim # Install comprehensive system dependencies for SWE-bench tasks RUN apt-get update && apt-get install -y --no-install-recommends \ diff --git a/pyproject.toml b/pyproject.toml index 49a8797..7e11873 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -135,6 +135,8 @@ ignore = [ "tests/**/*.py" = ["S", "T20"] "infrastructure/**/*.py" = ["S603", "S607"] "src/mcpbr/infrastructure/**/*.py" = ["S603", "S607", "S108"] +"src/mcpbr/benchmarks/deadcode.py" = ["S608"] +"src/mcpbr/benchmarks/supermodel/benchmark.py" = ["S608"] "scripts/**/*.py" = ["T20", "S"] [tool.pytest.ini_options] diff --git a/src/mcpbr/__init__.py b/src/mcpbr/__init__.py index 7a5105e..cdb62ef 100644 --- a/src/mcpbr/__init__.py +++ b/src/mcpbr/__init__.py @@ -3,7 +3,7 @@ A benchmark runner for evaluating MCP servers against SWE-bench tasks. """ -__version__ = "0.14.0" +__version__ = "0.14.1" from .sdk import ( BenchmarkResult, diff --git a/src/mcpbr/benchmarks/__init__.py b/src/mcpbr/benchmarks/__init__.py index 419ceb9..61e0204 100644 --- a/src/mcpbr/benchmarks/__init__.py +++ b/src/mcpbr/benchmarks/__init__.py @@ -15,6 +15,7 @@ from .codereval import CoderEvalBenchmark from .custom import CustomBenchmark from .cybergym import CyberGymBenchmark +from .deadcode import DeadCodeBenchmark from .gaia import GAIABenchmark from .gsm8k import GSM8KBenchmark from .hellaswag import HellaSwagBenchmark @@ -28,6 +29,7 @@ from .mlagentbench import MLAgentBenchBenchmark from .mmmu import MMMUBenchmark from .repoqa import RepoQABenchmark +from .supermodel.benchmark import SupermodelBenchmark from .swebench import SWEBenchmark from .terminalbench import TerminalBenchBenchmark from .toolbench import ToolBenchBenchmark @@ -50,6 +52,7 @@ "CoderEvalBenchmark", "CustomBenchmark", "CyberGymBenchmark", + "DeadCodeBenchmark", "GAIABenchmark", "GSM8KBenchmark", "HellaSwagBenchmark", @@ -64,6 +67,7 @@ "MMMUBenchmark", "RepoQABenchmark", "SWEBenchmark", + "SupermodelBenchmark", "TerminalBenchBenchmark", "ToolBenchBenchmark", "TruthfulQABenchmark", @@ -106,6 +110,8 @@ "mmmu": MMMUBenchmark, "longbench": LongBenchBenchmark, "adversarial": AdversarialBenchmark, + "dead-code": DeadCodeBenchmark, # type: ignore[dict-item] + "supermodel": SupermodelBenchmark, # type: ignore[dict-item] } diff --git a/src/mcpbr/benchmarks/_bench_utils.py b/src/mcpbr/benchmarks/_bench_utils.py new file mode 100644 index 0000000..5fe8d25 --- /dev/null +++ b/src/mcpbr/benchmarks/_bench_utils.py @@ -0,0 +1,122 @@ +"""Shared utilities for benchmark implementations.""" + +import json +import logging +import subprocess +from pathlib import Path +from typing import Any + +logger = logging.getLogger("mcpbr.benchmarks") + + +def extract_findings_from_text(text: str, findings_key: str = "dead_code") -> list[dict[str, Any]]: + """Extract findings array from text/patch content by locating a JSON key. + + Searches for a JSON key (e.g. "dead_code") and extracts the associated array + using bracket-depth matching. Handles brackets inside JSON strings correctly. + + Args: + text: Raw text that may contain a JSON object with the findings key. + findings_key: The JSON key whose array value to extract. + + Returns: + List of finding dicts, or empty list if not found/parseable. + """ + findings: list[dict[str, Any]] = [] + try: + marker = f'"{findings_key}"' + start = text.find(marker) + if start == -1: + return findings + arr_start = text.find("[", start) + if arr_start == -1: + return findings + # Bracket-depth matching that respects JSON strings + depth = 0 + in_string = False + escape_next = False + for i, c in enumerate(text[arr_start:], arr_start): + if escape_next: + escape_next = False + continue + if c == "\\": + if in_string: + escape_next = True + continue + if c == '"': + in_string = not in_string + continue + if in_string: + continue + if c == "[": + depth += 1 + elif c == "]": + depth -= 1 + if depth == 0: + arr_text = text[arr_start : i + 1] + parsed = json.loads(arr_text) + if isinstance(parsed, list): + findings = parsed + break + except (json.JSONDecodeError, ValueError): + pass + return findings + + +def init_git_workdir(host_workdir: str, timeout: int = 30) -> None: + """Initialize a git repo in a workdir so the harness can track modifications. + + Args: + host_workdir: Path to the working directory. + timeout: Timeout in seconds for each git command. + """ + subprocess.run( + ["git", "init"], cwd=host_workdir, capture_output=True, check=False, timeout=timeout + ) + subprocess.run( + ["git", "config", "user.email", "mcpbr@test.com"], + cwd=host_workdir, + capture_output=True, + check=False, + timeout=timeout, + ) + subprocess.run( + ["git", "config", "user.name", "MCPBR"], + cwd=host_workdir, + capture_output=True, + check=False, + timeout=timeout, + ) + subprocess.run( + ["git", "add", "-A"], + cwd=host_workdir, + capture_output=True, + check=False, + timeout=timeout, + ) + subprocess.run( + ["git", "commit", "-m", "Initial"], + cwd=host_workdir, + capture_output=True, + check=False, + timeout=timeout, + ) + + +def safe_write_file(host_workdir: str, file_path: str, content: str) -> None: + """Write a file within host_workdir, raising if the path escapes containment. + + Args: + host_workdir: Root directory that all writes must stay within. + file_path: Relative path of the file to write. + content: File content. + + Raises: + ValueError: If the resolved path is outside host_workdir. + """ + root = Path(host_workdir).resolve() + full_path = (root / file_path).resolve() + if not full_path.is_relative_to(root): + raise ValueError(f"Path traversal detected: {file_path!r} escapes {host_workdir!r}") + full_path.parent.mkdir(parents=True, exist_ok=True) + full_path.write_text(content) diff --git a/src/mcpbr/benchmarks/codegraph.py b/src/mcpbr/benchmarks/codegraph.py index 459a712..5396914 100644 --- a/src/mcpbr/benchmarks/codegraph.py +++ b/src/mcpbr/benchmarks/codegraph.py @@ -15,7 +15,7 @@ import json import logging import re -from datetime import datetime, timezone +from datetime import UTC, datetime from typing import Any from datasets import load_dataset @@ -266,7 +266,7 @@ async def _setup_environment(self, env: TaskEnvironment, task: dict[str, Any]) - "version": 1, "repoName": cache_name, "commitHash": None, - "savedAt": datetime.now(timezone.utc).isoformat(), + "savedAt": datetime.now(UTC).isoformat(), "raw": result, } @@ -382,7 +382,9 @@ def _count_steps(self, text: str) -> int: return 1 # Count tool call patterns in the output - tool_calls = len(re.findall(r"(?:tool_use|tool_call||Tool:|Calling)", text, re.IGNORECASE)) + tool_calls = len( + re.findall(r"(?:tool_use|tool_call||Tool:|Calling)", text, re.IGNORECASE) + ) return max(tool_calls, 1) def get_prebuilt_image(self, task: dict[str, Any]) -> str | None: diff --git a/src/mcpbr/benchmarks/deadcode.py b/src/mcpbr/benchmarks/deadcode.py new file mode 100644 index 0000000..f258a80 --- /dev/null +++ b/src/mcpbr/benchmarks/deadcode.py @@ -0,0 +1,448 @@ +"""Dead code detection benchmark implementation.""" + +import json +import logging +import shutil +import subprocess +import tempfile +from pathlib import Path +from typing import Any + +from ..docker_env import DockerEnvironmentManager, TaskEnvironment +from ._bench_utils import extract_findings_from_text, init_git_workdir, safe_write_file +from .base import BenchmarkTask + +logger = logging.getLogger("mcpbr.deadcode") + +# Corpus repository configuration +CORPUS_REPO = "git@github.com:supermodeltools/dead-code-benchmark-corpus.git" +CORPUS_HTTPS = "https://github.com/supermodeltools/dead-code-benchmark-corpus.git" +DEFAULT_CORPUS_CACHE = Path.home() / ".cache" / "mcpbr" / "dead-code-benchmark-corpus" + +# Placeholder content for the report file - agent will modify this +REPORT_PLACEHOLDER = """{ + "dead_code": [], + "analysis_complete": false +} +""" + + +def _clone_or_update_corpus(corpus_path: Path | None = None) -> Path: + """Clone or update the dead-code-benchmark-corpus repository. + + Args: + corpus_path: Optional path to use. If None, uses DEFAULT_CORPUS_CACHE. + + Returns: + Path to the corpus directory. + """ + corpus_dir = corpus_path or DEFAULT_CORPUS_CACHE + corpus_dir.parent.mkdir(parents=True, exist_ok=True) + + if corpus_dir.exists() and (corpus_dir / ".git").exists(): + # Update existing repo + result = subprocess.run( + ["git", "pull", "--quiet"], + cwd=corpus_dir, + capture_output=True, + check=False, + timeout=120, + ) + if result.returncode != 0: + logger.warning("git pull failed for corpus (using stale copy): %s", result.stderr) + else: + # Clone fresh + if corpus_dir.exists(): + shutil.rmtree(corpus_dir) + + # Try SSH first, fall back to HTTPS + result = subprocess.run( + ["git", "clone", "--quiet", CORPUS_REPO, str(corpus_dir)], + capture_output=True, + check=False, + timeout=120, + ) + if result.returncode != 0: + subprocess.run( + ["git", "clone", "--quiet", CORPUS_HTTPS, str(corpus_dir)], + capture_output=True, + check=True, + timeout=120, + ) + + return corpus_dir + + +def _load_corpus_task( + corpus_dir: Path, task_name: str = "typescript-express-app" +) -> dict[str, Any]: + """Load a task from the corpus. + + Args: + corpus_dir: Path to the corpus directory. + task_name: Name of the task (directory name). + + Returns: + Task dictionary with repo_content, dead_code, alive_code. + """ + # Load ground truth + ground_truth_path = corpus_dir / ".benchmark" / f"{task_name}.json" + if not ground_truth_path.exists(): + raise FileNotFoundError(f"Ground truth not found: {ground_truth_path}") + + with open(ground_truth_path) as f: + ground_truth = json.load(f) + + # Load all source files + task_dir = corpus_dir / task_name + src_dir = task_dir / "src" + + repo_content: dict[str, str] = {} + repo_content["REPORT.json"] = REPORT_PLACEHOLDER + + # Walk source files by language + lang = ground_truth["metadata"].get("language", "typescript") + extensions = { + "typescript": ["*.ts", "*.tsx"], + "javascript": ["*.js", "*.jsx", "*.mjs"], + "python": ["*.py"], + } + for pattern in extensions.get(lang, ["*.ts", "*.tsx"]): + for ts_file in src_dir.rglob(pattern): + rel_path = ts_file.relative_to(task_dir) + repo_content[str(rel_path)] = ts_file.read_text() + + # Include package.json and tsconfig if they exist + for config_file in ["package.json", "tsconfig.json"]: + config_path = task_dir / config_file + if config_path.exists(): + repo_content[config_file] = config_path.read_text() + + # Load pre-generated dead code analysis separately (only for MCP agent) + mcp_only_content: dict[str, str] = {} + analysis_path = task_dir / ".supermodel" / "dead-code-analysis.json" + if analysis_path.exists(): + mcp_only_content[".supermodel/dead-code-analysis.json"] = analysis_path.read_text() + + return { + "instance_id": ground_truth["metadata"]["task_id"], + "language": ground_truth["metadata"]["language"], + "difficulty": ground_truth["metadata"].get("difficulty", "hard"), + "repo_content": repo_content, + "mcp_only_content": mcp_only_content, # Only written for MCP agent + "dead_code": ground_truth["dead_code"], + "alive_code": ground_truth["alive_code"], + "metadata": ground_truth["metadata"], + } + + +class DeadCodeBenchmark: + """Dead code detection benchmark.""" + + name = "dead-code" + + def __init__( + self, + dataset: str | Path = "", + corpus_path: str | Path | None = None, + resolved_threshold: float = 0.8, + ): + """Initialize the benchmark. + + Args: + dataset: Path to a JSON dataset file (legacy, optional). + corpus_path: Path to cached corpus directory. If None, uses default cache. + resolved_threshold: P/R threshold to consider a task resolved. + """ + self.dataset = dataset + self.corpus_path = Path(corpus_path) if corpus_path else None + self.resolved_threshold = resolved_threshold + self._tasks: list[dict[str, Any]] | None = None + self._corpus_dir: Path | None = None + + def load_tasks( + self, + sample_size: int | None = None, + task_ids: list[str] | None = None, + _level: int | None = None, + filter_difficulty: list[str] | None = None, + filter_category: list[str] | None = None, + filter_tags: list[str] | None = None, + ) -> list[dict[str, Any]]: + """Load and filter dead code benchmark tasks from the corpus. + + Args: + sample_size: Maximum number of tasks to return. + task_ids: Specific task instance IDs to include. + _level: Unused (kept for interface compatibility). + filter_difficulty: Filter by difficulty level (e.g. ['easy', 'hard']). + filter_category: Filter by language category. + filter_tags: Unused tag filter. + + Returns: + List of task dicts with problem_statement and metadata. + """ + _ = filter_tags + tasks = self._load_raw_tasks() + + if task_ids: + task_id_set = set(task_ids) + tasks = [t for t in tasks if t["instance_id"] in task_id_set] + + if filter_difficulty: + difficulty_set = set(filter_difficulty) + tasks = [t for t in tasks if t.get("difficulty", "medium") in difficulty_set] + + if filter_category: + category_set = set(filter_category) + tasks = [t for t in tasks if t.get("language", "python") in category_set] + + if sample_size and len(tasks) > sample_size: + tasks = tasks[:sample_size] + + # Add problem_statement to each task (required by harness) + for task in tasks: + task["problem_statement"] = self._generate_problem_statement(task) + + return tasks + + def _load_raw_tasks(self) -> list[dict[str, Any]]: + if self._tasks is not None: + return self._tasks + + # First check for explicit dataset file + dataset_path = Path(self.dataset) if self.dataset else None + if dataset_path and dataset_path.exists(): + with open(dataset_path) as f: + self._tasks = json.load(f) + return self._tasks + + # Load from corpus repository + try: + self._corpus_dir = _clone_or_update_corpus(self.corpus_path) + task = _load_corpus_task(self._corpus_dir, "typescript-express-app") + self._tasks = [task] + except Exception as e: + # Fall back to error message if corpus unavailable + raise RuntimeError( + f"Failed to load dead code corpus: {e}\n" + "Ensure you have access to the supermodeltools/dead-code-benchmark-corpus repository." + ) from e + + return self._tasks + + def normalize_task(self, task: dict[str, Any]) -> BenchmarkTask: + """Normalize a raw task dict into a BenchmarkTask. + + Args: + task: Raw task dict from load_tasks. + + Returns: + BenchmarkTask with standardized fields. + """ + instance_id = task.get("instance_id", "unknown") + problem_statement = self._generate_problem_statement(task) + + return BenchmarkTask( + task_id=instance_id, + problem_statement=problem_statement, + repo="local/dead-code-detection", + commit="HEAD", + metadata={ + "language": task.get("language", "unknown"), + "difficulty": task.get("difficulty", "medium"), + "dead_code": task.get("dead_code", []), + "alive_code": task.get("alive_code", []), + }, + ) + + def _generate_problem_statement(self, task: dict[str, Any]) -> str: + instance_id = task.get("instance_id", "unknown") + language = task.get("language", "unknown") + metadata = task.get("metadata", {}) + total_files = metadata.get("total_files", len(task.get("repo_content", {}))) + dead_count = metadata.get("dead_functions", len(task.get("dead_code", []))) + + # List files (excluding config files for cleaner output) + files = [ + f + for f in task.get("repo_content", {}) + if f not in ("REPORT.json", "package.json", "tsconfig.json") + and not f.startswith(".supermodel/") + ] + + return f"""Find all dead code in this {language} codebase. + +Task: {instance_id} +Total files: {total_files} +Approximate dead functions: {dead_count} + +Source files to analyze: +{chr(10).join(f" - {f}" for f in sorted(files)[:20])} +{f" ... and {len(files) - 20} more files" if len(files) > 20 else ""} + +INSTRUCTIONS: +1. Read all source files in the workspace +2. Identify entry points (exported functions, route handlers, main modules) +3. Trace the call graph to find which functions are actually reachable +4. Find functions/classes/variables that are NEVER called or referenced + +CRITICAL: Update the existing REPORT.json file with your findings. +Format: a JSON object with "dead_code" array containing objects with file, name, line, and type fields. +Set "analysis_complete" to true when done. + +Rules: +- Exported functions that are used by routes ARE alive (they are entry points) +- Functions called transitively from entry points are NOT dead +- Functions that are only referenced in comments/strings ARE dead +- Only mark truly unreachable code as dead +""" + + async def create_environment( + self, + task: dict[str, Any], + docker_manager: DockerEnvironmentManager, + is_mcp: bool = False, + ) -> TaskEnvironment: + """Create an isolated Docker environment for a dead code detection task. + + Args: + task: Task dict containing repo_content and mcp_only_content. + docker_manager: Docker environment manager for container lifecycle. + is_mcp: If True, include pre-computed analysis files in the workspace. + + Returns: + TaskEnvironment with the workspace mounted and git initialized. + """ + instance_id = task.get("instance_id", "unknown") + repo_content = task.get("repo_content", {}) + mcp_only_content = task.get("mcp_only_content", {}) + + await docker_manager._ensure_fallback_image() + image_name = docker_manager.FALLBACK_IMAGE + + temp_dir = tempfile.TemporaryDirectory(prefix=f"mcpbr_{instance_id}_") + docker_manager._temp_dirs.append(temp_dir) + host_workdir = temp_dir.name + + # Write all files including REPORT.json + for file_path, content in repo_content.items(): + safe_write_file(host_workdir, file_path, content) + + # Write MCP-only files (e.g., pre-computed analysis) only for MCP agent + if is_mcp: + for file_path, content in mcp_only_content.items(): + safe_write_file(host_workdir, file_path, content) + + container_name = f"mcpbr-{docker_manager._session_id}-{instance_id}" + container_workdir = "/workspace" + + container = docker_manager.client.containers.run( + image_name, + command="tail -f /dev/null", + name=container_name, + detach=True, + network_mode="bridge", + volumes={host_workdir: {"bind": "/workspace", "mode": "rw"}}, + working_dir=container_workdir, + remove=False, + labels={ + "mcpbr": "true", + "session_id": docker_manager._session_id, + "instance_id": instance_id, + }, + ) + + docker_manager._containers.append(container) + + env = TaskEnvironment( + container=container, + workdir=container_workdir, + host_workdir=host_workdir, + instance_id=instance_id, + uses_prebuilt=False, + claude_cli_installed=False, + ) + + # Init git so modifications are tracked + init_git_workdir(host_workdir) + + return env + + async def evaluate( + self, + env: TaskEnvironment, + task: dict[str, Any], + solution: str, + ) -> dict[str, Any]: + """Evaluate by reading REPORT.json from the workspace.""" + expected_dead = task.get("dead_code", []) + + # Read REPORT.json from host (faster than docker exec) + report_path = Path(env.host_workdir) / "REPORT.json" + + agent_findings: list[dict[str, Any]] = [] + + if report_path.exists(): + try: + with open(report_path) as f: + report = json.load(f) + agent_findings = report.get("dead_code", []) + except (OSError, json.JSONDecodeError): + # Try parsing from the solution/patch string + agent_findings = self._extract_findings_from_text(solution) + else: + agent_findings = self._extract_findings_from_text(solution) + + # Calculate metrics + found_set = {(f.get("file", ""), f.get("name", "")) for f in agent_findings} + dead_set = {(d.get("file", ""), d.get("name", "")) for d in expected_dead} + + tp = len(found_set & dead_set) + fp = len(found_set - dead_set) + fn = len(dead_set - found_set) + + precision = tp / (tp + fp) if (tp + fp) > 0 else 0.0 + recall = tp / (tp + fn) if (tp + fn) > 0 else 0.0 + f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0.0 + + resolved = precision >= self.resolved_threshold and recall >= self.resolved_threshold + + # Log results for visibility + print(f"\n{'=' * 50}") + print(f"DEAD CODE EVALUATION - {env.instance_id}") + print(f" Found: {len(agent_findings)} items") + print(f" Expected: {len(expected_dead)} dead functions") + print(f" True Positives: {tp}") + print(f" False Positives: {fp}") + print(f" False Negatives: {fn}") + print(f" Precision: {precision * 100:.1f}%") + print(f" Recall: {recall * 100:.1f}%") + print(f" F1 Score: {f1 * 100:.1f}%") + print(f"{'=' * 50}\n") + + return { + "resolved": resolved, + "precision": round(precision, 3), + "recall": round(recall, 3), + "f1_score": round(f1, 3), + "true_positives": tp, + "false_positives": fp, + "false_negatives": fn, + "found": len(agent_findings), + "expected": len(expected_dead), + } + + def _extract_findings_from_text(self, text: str) -> list[dict[str, Any]]: + """Extract findings from text/patch content.""" + return extract_findings_from_text(text, "dead_code") + + def get_prebuilt_image(self, task: dict[str, Any]) -> str | None: + return None + + def get_prompt_template(self) -> str: + return ( + "Analyze the codebase and identify all dead code.\n\n" + "{problem_statement}\n\n" + "Update REPORT.json with your findings." + ) diff --git a/src/mcpbr/benchmarks/supermodel/__init__.py b/src/mcpbr/benchmarks/supermodel/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/mcpbr/benchmarks/supermodel/api_client.py b/src/mcpbr/benchmarks/supermodel/api_client.py new file mode 100644 index 0000000..70836f7 --- /dev/null +++ b/src/mcpbr/benchmarks/supermodel/api_client.py @@ -0,0 +1,166 @@ +"""Async Supermodel API client with polling and idempotency support.""" + +import asyncio +import hashlib +import json +import logging +import os +import sys +import tempfile +import time + +logger = logging.getLogger("mcpbr.supermodel") + + +async def call_supermodel_api( + endpoint_path: str, + zip_path: str, + api_base: str, + api_key: str | None = None, + idempotency_key: str | None = None, + max_poll_time: int = 600, +) -> dict: + """Call a Supermodel API endpoint with a zipped repo. + + Uses curl subprocess for the HTTP request and polls for async results. + + Args: + endpoint_path: API endpoint path (e.g. '/v1/analysis/dead-code'). + zip_path: Path to the zipped repository archive. + api_base: Base URL for the Supermodel API. + api_key: Optional API key. + idempotency_key: Optional idempotency key (auto-generated from zip hash if not provided). + max_poll_time: Maximum time to poll for results in seconds. + + Returns: + Parsed API response dict. + + Raises: + RuntimeError: If the API request fails or times out. + """ + url = f"{api_base}{endpoint_path}" + + if not idempotency_key: + with open(zip_path, "rb") as f: + zip_hash = hashlib.sha256(f.read()).hexdigest()[:12] + ep_name = endpoint_path.strip("/").replace("/", "-") + idempotency_key = f"bench:{ep_name}:{zip_hash}:v2" + + headers = [ + "-H", + "Accept: application/json", + "-H", + f"Idempotency-Key: {idempotency_key}", + ] + + # Pass API key via curl config file to avoid exposure in process table (ps aux) + api_key_config_path: str | None = None + if api_key: + with tempfile.NamedTemporaryFile( + mode="w", suffix=".cfg", prefix="mcpbr_curl_", delete=False + ) as api_key_fd: + api_key_fd.write(f'header = "X-Api-Key: {api_key}"\n') + api_key_config_path = api_key_fd.name + os.chmod(api_key_config_path, 0o600) + + # Initial request with file upload + upload_cmd = ["curl", "-s", "-X", "POST", url, "-F", f"file=@{zip_path}", *headers] + if api_key_config_path: + upload_cmd.extend(["--config", api_key_config_path]) + + start_time = time.time() + print( + f" Supermodel API: uploading {zip_path} to {endpoint_path}...", file=sys.stderr, flush=True + ) + + proc = await asyncio.create_subprocess_exec( + *upload_cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + stdout, stderr = await asyncio.wait_for(proc.communicate(), timeout=180) + + if proc.returncode != 0: + raise RuntimeError(f"Supermodel API request failed: {stderr.decode()}") + + try: + response = json.loads(stdout.decode()) + except json.JSONDecodeError as e: + raise RuntimeError(f"Non-JSON response from Supermodel API: {stdout.decode()[:500]}") from e + + # Poll if async — use lightweight requests (1-byte dummy file instead of + # re-uploading the full zip). The API recognizes the idempotency key and + # returns the cached job status without reprocessing. + poll_dummy_path: str | None = None + poll_count = 0 + + try: + while response.get("status") in ("pending", "processing"): + elapsed = time.time() - start_time + if elapsed > max_poll_time: + raise RuntimeError(f"Supermodel API timed out after {max_poll_time}s") + + # Create poll dummy on first iteration only + if poll_dummy_path is None: + with tempfile.NamedTemporaryFile(suffix=".zip", delete=False) as poll_dummy: + poll_dummy.write(b"\n") + poll_dummy_path = poll_dummy.name + + poll_cmd = [ + "curl", + "-s", + "-X", + "POST", + url, + "-F", + f"file=@{poll_dummy_path}", + *headers, + ] + if api_key_config_path: + poll_cmd.extend(["--config", api_key_config_path]) + + retry_after = response.get("retryAfter", 10) + poll_count += 1 + print( + f" Supermodel API: {response.get('status')} " + f"(poll #{poll_count}, {elapsed:.0f}s elapsed, retry in {retry_after}s)", + file=sys.stderr, + flush=True, + ) + await asyncio.sleep(retry_after) + + proc = await asyncio.create_subprocess_exec( + *poll_cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + stdout, stderr = await asyncio.wait_for(proc.communicate(), timeout=60) + + if proc.returncode != 0: + raise RuntimeError(f"Supermodel API poll failed: {stderr.decode()}") + try: + response = json.loads(stdout.decode()) + except json.JSONDecodeError as e: + raise RuntimeError( + f"Non-JSON poll response from Supermodel API: {stdout.decode()[:500]}" + ) from e + finally: + if poll_dummy_path is not None: + os.unlink(poll_dummy_path) + if api_key_config_path is not None: + os.unlink(api_key_config_path) + + elapsed = time.time() - start_time + + # Check for error responses (status can be string "error" or HTTP status int) + status = response.get("status") + if status == "error" or response.get("error"): + raise RuntimeError( + f"Supermodel API error: {response.get('error', response.get('message'))}" + ) + if isinstance(status, int) and status >= 400: + raise RuntimeError(f"Supermodel API HTTP {status}: {response.get('message', response)}") + + api_result = response.get("result", response) + print(f" Supermodel API: completed in {elapsed:.1f}s", file=sys.stderr, flush=True) + return dict(api_result) diff --git a/src/mcpbr/benchmarks/supermodel/benchmark.py b/src/mcpbr/benchmarks/supermodel/benchmark.py new file mode 100644 index 0000000..9a7efe2 --- /dev/null +++ b/src/mcpbr/benchmarks/supermodel/benchmark.py @@ -0,0 +1,679 @@ +"""SupermodelBenchmark -- PR-based analysis benchmark for mcpbr. + +Supports multiple analysis types (dead-code, impact, test-coverage, circular-deps) +via endpoint plugins. Uses GitHub PRs for ground truth extraction and the Supermodel +API for pre-computed analysis in the enhanced (MCP) condition. +""" + +import atexit +import hashlib +import json +import logging +import os +import shutil +import sys +import tempfile +import traceback +from collections import Counter +from pathlib import Path +from typing import Any + +from ...docker_env import DockerEnvironmentManager, TaskEnvironment +from .._bench_utils import extract_findings_from_text, init_git_workdir +from ..base import BenchmarkTask +from .api_client import call_supermodel_api +from .endpoints import get_endpoint +from .evaluation import compute_prf1 +from .git_utils import clone_repo_at_commit, get_pre_merge_commit, zip_repo + +logger = logging.getLogger("mcpbr.supermodel") + +DEFAULT_GT_DIR = Path.home() / ".cache" / "mcpbr" / "supermodel_ground_truth" + + +class SupermodelBenchmark: + """Supermodel analysis benchmark with PR-based ground truth. + + Implements the mcpbr Benchmark protocol. Each task is a GitHub PR + where the ground truth is extracted from the diff. + """ + + name = "supermodel" + evaluate_without_patch = True # Uses REPORT.json, not git diff + + def __init__( + self, + analysis_type: str = "dead-code", + tasks: list[dict[str, Any]] | None = None, + supermodel_api_base: str = "https://api.supermodel.dev", + supermodel_api_key: str | None = None, + resolved_threshold: float = 0.8, + ground_truth_dir: str | Path | None = None, + supermodel_api_timeout: int = 900, + **kwargs: Any, + ): + """Initialize the Supermodel benchmark. + + Args: + analysis_type: Analysis endpoint to use (dead-code, impact, test-coverage, + circular-deps). + tasks: List of task config dicts from YAML. + supermodel_api_base: Base URL for Supermodel API. + supermodel_api_key: API key (or set SUPERMODEL_API_KEY env var). + resolved_threshold: Recall threshold to consider a task 'resolved' (precision is + reported but not required — the API returns many valid dead-code + candidates beyond the GT set, so precision is not a fair gate). + ground_truth_dir: Directory to cache ground truth JSON files. + supermodel_api_timeout: Max seconds to wait for Supermodel API (default 900). + **kwargs: Additional keyword arguments (ignored for forward compat). + """ + self.analysis_type = analysis_type + self._tasks_config = tasks or [] + self.api_base = supermodel_api_base + self.api_key = supermodel_api_key or os.environ.get("SUPERMODEL_API_KEY") + self.api_timeout = supermodel_api_timeout + self.resolved_threshold = resolved_threshold + self.gt_dir = Path(ground_truth_dir) if ground_truth_dir else DEFAULT_GT_DIR + self.gt_dir.mkdir(parents=True, exist_ok=True) + + self._endpoint = get_endpoint(analysis_type) + self._loaded_tasks: list[dict[str, Any]] | None = None + self._work_dir = Path(tempfile.mkdtemp(prefix="mcpbr_supermodel_")) + atexit.register(self._cleanup_work_dir) + + def load_tasks( + self, + sample_size: int | None = None, + task_ids: list[str] | None = None, + _level: int | None = None, + filter_difficulty: list[str] | None = None, + filter_category: list[str] | None = None, + filter_tags: list[str] | None = None, + ) -> list[dict[str, Any]]: + """Load tasks from config and extract ground truth from PR diffs. + + Ground truth is cached in gt_dir to avoid repeated GitHub API calls. + """ + _ = _level, filter_tags + + tasks = [] + for task_cfg in self._tasks_config: + task_id = task_cfg["id"] + repo = task_cfg.get("repo", "") + language = task_cfg.get("language", "typescript") + scope_prefix = task_cfg.get("scope_prefix") + description = task_cfg.get("description", "") + + # Corpus mode: ground_truth_file points to a pre-existing GT JSON + gt_file = task_cfg.get("ground_truth_file") + if gt_file: + gt_path = Path(gt_file).expanduser() + if gt_path.exists(): + with open(gt_path) as f: + gt = json.load(f) + logger.info(f"Loaded corpus GT: {len(gt)} items from {gt_path}") + else: + logger.warning(f"GT file not found: {gt_path}, skipping {task_id}") + continue + else: + # PR mode: extract from diff + pr_number = task_cfg["pr_number"] + gt = self._load_ground_truth(task_id, repo, pr_number, language, scope_prefix) + + if not gt: + logger.warning(f"No ground truth for {task_id}, skipping") + continue + + task = { + "instance_id": task_id, + "repo": repo, + "pr_number": task_cfg.get("pr_number"), + "merge_commit": task_cfg.get("merge_commit", task_cfg.get("commit", "HEAD")), + "commit": task_cfg.get("commit"), + "clone_url": task_cfg.get("clone_url"), + "language": language, + "scope_prefix": scope_prefix, + "description": description, + "ground_truth": gt, + "problem_statement": self._generate_baseline_problem_statement(task_cfg), + "problem_statement_enhanced": self._generate_enhanced_problem_statement(task_cfg), + "problem_statement_baseline": self._generate_baseline_problem_statement(task_cfg), + "zip_exclude": task_cfg.get("zip_exclude", []), + "cached_analysis": task_cfg.get("cached_analysis"), + } + tasks.append(task) + + if task_ids: + task_id_set = set(task_ids) + tasks = [t for t in tasks if t["instance_id"] in task_id_set] + + if filter_difficulty: + difficulty_set = set(filter_difficulty) + tasks = [t for t in tasks if t.get("difficulty", "hard") in difficulty_set] + + if filter_category: + category_set = set(filter_category) + tasks = [t for t in tasks if t.get("language", "typescript") in category_set] + + if sample_size and len(tasks) > sample_size: + tasks = tasks[:sample_size] + + self._loaded_tasks = tasks + return tasks + + def _load_ground_truth( + self, + task_id: str, + repo: str, + pr_number: int, + language: str, + scope_prefix: str | None, + ) -> list[dict]: + """Load cached ground truth or extract from PR diff.""" + ep_name = self._endpoint.name + gt_path = self.gt_dir / f"{ep_name}_{task_id}.json" + + if gt_path.exists(): + with open(gt_path) as f: + gt = json.load(f) + logger.info(f"Loaded cached GT: {len(gt)} items from {gt_path}") + return list(gt) + + logger.info(f"Extracting ground truth for {task_id} from PR diff...") + gt = self._endpoint.extract_ground_truth(repo, pr_number, language, scope_prefix) + + with open(gt_path, "w") as f: + json.dump(gt, f, indent=2) + logger.info(f"Extracted {len(gt)} ground truth items -> {gt_path}") + + return list(gt) + + def _cleanup_work_dir(self) -> None: + """Remove the temporary work directory.""" + if self._work_dir.exists(): + shutil.rmtree(self._work_dir, ignore_errors=True) + + def _generate_enhanced_problem_statement(self, task_cfg: dict) -> str: + """Generate problem statement for the enhanced (graph-assisted) condition. + + Uses the v2 prompt if the endpoint provides one, otherwise falls back to + the original v1 prompt from the endpoint plugin. + """ + prompt = getattr(self._endpoint, "enhanced_prompt_v2", None) + if prompt: + return str(prompt) + + # Fallback: original v1 prompt from the endpoint plugin + return str(self._endpoint.enhanced_prompt) + + def _generate_baseline_problem_statement(self, task_cfg: dict) -> str: + """Generate problem statement for the baseline (manual analysis) condition. + + The agent must find dead code by reading and searching the codebase directly. + """ + language = task_cfg.get("language", "typescript") + + ext = ".ts" if language == "typescript" else ".py" + if language == "python": + lang_hints = """- Functions in __all__ that are never actually imported by other modules +- Cleanup/utility functions whose associated state is never populated""" + else: + lang_hints = """- Exported functions/classes that are never imported by any other module +- Middleware or handlers that are defined but never registered with the router +- Methods on classes where the class itself is never instantiated from live code""" + + return f"""You are a code analyst. Find dead code in this {language} codebase. + +Dead code = exported functions, classes, methods, interfaces, and constants that +are defined but never used in any meaningful execution path. + +{lang_hints} + +== STRATEGY == + +STEP 1: Get an overview of the codebase structure. + - List the top-level directories and key source files. + - Identify the main source directories (exclude node_modules, dist, build, tests). + +STEP 2: Scan source files for exported symbols. + - Focus on non-test, non-generated source files. + - For each file, note exported functions, classes, interfaces, constants. + +STEP 3: For each exported symbol, grep the codebase for references. + - If it only appears in its own definition file (and possibly tests or + barrel/index re-exports), it is likely dead. + - Barrel re-exports (index.ts) do NOT count as real usage. + - Type-only imports do NOT count as real usage. + +STEP 4: Write REPORT.json EARLY (after scanning even a few files). + - Write what you have so far, then continue scanning and UPDATE the file. + - This ensures you always produce output even if you run out of iterations. + +REPORT.json format: +{{ + "dead_code": [ + {{"file": "path/to/file{ext}", "name": "unusedFunc", "type": "function", "reason": "no callers found"}}, + ... + ], + "analysis_complete": true +}} + +CRITICAL RULES: +- Type should be one of: function, class, method, const, interface, variable. +- When in doubt about whether something is dead, INCLUDE it. +- False positives are acceptable. Missing real dead code is NOT acceptable. +- Write REPORT.json after analyzing each batch — do NOT wait until the end. +- Prioritize breadth over depth: scan ALL source files before deep-diving any one.""" + + def normalize_task(self, task: dict[str, Any]) -> BenchmarkTask: + instance_id = task.get("instance_id", "unknown") + return BenchmarkTask( + task_id=instance_id, + problem_statement=task.get("problem_statement", ""), + repo=task.get("repo", "unknown"), + commit=task.get("merge_commit", "HEAD"), + metadata={ + "language": task.get("language", "typescript"), + "analysis_type": self.analysis_type, + "ground_truth_count": len(task.get("ground_truth", [])), + }, + ) + + async def create_environment( + self, + task: dict[str, Any], + docker_manager: DockerEnvironmentManager, + ) -> TaskEnvironment: + """Create an isolated environment for the task. + + Clones repo at pre-merge commit, calls Supermodel API (or uses cached + analysis), places analysis JSON in workdir, writes REPORT.json placeholder. + """ + # Copy to avoid mutating the shared task dict (breaks A/B comparisons) + task = {**task} + task["problem_statement"] = task.get( + "problem_statement_enhanced", task["problem_statement"] + ) + + instance_id = task["instance_id"] + repo = task.get("repo", "") + scope_prefix = task.get("scope_prefix") + + # Clone repo - corpus mode (clone_url + commit) or PR mode (repo + merge_commit) + repo_dir = self._work_dir / f"repo-{instance_id}" + if not repo_dir.exists(): + clone_url = task.get("clone_url") + if clone_url: + # Corpus mode: clone directly at specified commit + commit = task.get("commit", "HEAD") + logger.info(f"Corpus mode: cloning {clone_url} at {commit[:8]}") + await clone_repo_at_commit(clone_url, commit, str(repo_dir)) + else: + # PR mode: get pre-merge commit from merge commit + merge_commit = task["merge_commit"] + pre_merge = await get_pre_merge_commit(repo, merge_commit) + logger.info(f"Pre-merge commit for {instance_id}: {pre_merge[:8]}") + await clone_repo_at_commit(repo, pre_merge, str(repo_dir)) + + # Create Docker environment + await docker_manager._ensure_fallback_image() + image_name = docker_manager.FALLBACK_IMAGE + + temp_dir = tempfile.TemporaryDirectory(prefix=f"mcpbr_{instance_id}_") + docker_manager._temp_dirs.append(temp_dir) + host_workdir = temp_dir.name + + # Copy repo to workdir (scoped if needed) + # ignore_dangling_symlinks: skip broken symlinks (e.g. Cal.com .env) + is_corpus = task.get("clone_url") is not None + if scope_prefix: + src_path = repo_dir / scope_prefix + if src_path.is_dir(): + if is_corpus: + # Corpus mode: scoped content goes to workdir root so GT paths match + shutil.copytree( + str(src_path), + host_workdir, + dirs_exist_ok=True, + ignore_dangling_symlinks=True, + ) + else: + # PR mode: preserve directory structure for PR-relative paths + dest_path = Path(host_workdir) / scope_prefix + shutil.copytree( + str(src_path), + str(dest_path), + ignore_dangling_symlinks=True, + ) + else: + shutil.copytree( + str(repo_dir), + host_workdir, + dirs_exist_ok=True, + ignore_dangling_symlinks=True, + ) + else: + shutil.copytree( + str(repo_dir), + host_workdir, + dirs_exist_ok=True, + ignore_dangling_symlinks=True, + ) + + # Write REPORT.json placeholder (key varies by analysis type) + report_path = Path(host_workdir) / "REPORT.json" + report_path.write_text(self._endpoint.report_placeholder()) + + # Place analysis JSON in workdir for the agent + # Priority: 1) cached_analysis file from task config, 2) Supermodel API call + try: + cached_path = task.get("cached_analysis") + if cached_path and Path(cached_path).exists(): + with open(cached_path) as f: + analysis_json = json.load(f) + print( + f" Using cached analysis: {cached_path}", + file=sys.stderr, + flush=True, + ) + else: + exclude_patterns = task.get("zip_exclude", []) + analysis_json = await self._get_analysis( + repo_dir, + instance_id, + scope_prefix, + exclude_patterns, + strip_prefix=is_corpus, + ) + + # --- Build analysis package for agent consumption --- + # Keep reason + confidence so the agent can filter intelligently. + # Also preserve metadata summary and entry points. + keep_fields = {"file", "name", "type", "reason", "confidence"} + + # Find the candidate key + candidate_key = None + for k in ("deadCodeCandidates", "candidates", "items"): + if k in analysis_json: + candidate_key = k + break + + all_candidates = analysis_json.get(candidate_key, []) if candidate_key else [] + + # Extract metadata and entry points early (needed for filtering) + metadata = analysis_json.get("metadata", {}) + + # Pre-filter type/interface candidates (high FP rate from structural typing) + type_interface_reasons = ( + "Type/interface with no references", + "Type with no references", + "Interface with no references", + ) + before_count = len(all_candidates) + all_candidates = [ + c + for c in all_candidates + if not any(str(c.get("reason", "")).startswith(r) for r in type_interface_reasons) + ] + type_filtered = before_count - len(all_candidates) + if type_filtered: + logger.info( + f"Pre-filtered {type_filtered} type/interface candidates for {instance_id}" + ) + + # NOTE: Cannot filter by reason — "Exported but file never imported" + # contains both true positives AND false positives when + # rootFilesCount is high. 16/20 GT items in tyr have this reason. + # The signal is polluted at the parser level (import resolution + # failure tags real dead code with the same reason as framework- + # wired code). See issue #676 for details. + root_files = metadata.get("rootFilesCount", 0) or 0 + + # Build entry point set for cross-reference filtering + ep_set = set() + for ep in analysis_json.get("entryPoints", []): + ep_file = ep.get("file", "") + ep_name = ep.get("name", "") + if ep_file and ep_name: + ep_set.add((ep_file, ep_name)) + + # Drop candidates that match entry points + if ep_set: + before_ep = len(all_candidates) + all_candidates = [ + c + for c in all_candidates + if (c.get("file", ""), c.get("name", "")) not in ep_set + ] + ep_filtered = before_ep - len(all_candidates) + if ep_filtered: + logger.info(f"Filtered {ep_filtered} entry point matches for {instance_id}") + else: + ep_filtered = 0 + + # Slim candidates to keep_fields + slimmed = [{k: v for k, v in c.items() if k in keep_fields} for c in all_candidates] + + # Build metadata summary for the agent + reason_counts = Counter(c.get("reason", "") for c in all_candidates) + confidence_counts = Counter(c.get("confidence", "") for c in all_candidates) + entry_points = analysis_json.get("entryPoints", []) + + metadata_summary = { + "totalCandidates": before_count, + "includedCandidates": len(slimmed), + "prefilteredTypeInterfaces": type_filtered, + "entryPointFiltered": ep_filtered, + "rootFilesCount": root_files, + "reasonBreakdown": dict(reason_counts.most_common()), + "confidenceBreakdown": dict(confidence_counts.most_common()), + } + + # Slim entry points for the whitelist + ep_keep = {"file", "name", "type", "reason"} + slim_entry_points = [ + {k: v for k, v in ep.items() if k in ep_keep} for ep in entry_points[:200] + ] + + # Chunk candidates into files of max 200 each (~150 chars/entry + # with reason+confidence = ~30K chars = ~7.5K tokens per chunk). + # Must stay under 10K token read limit. + max_per_file = 200 + total = len(slimmed) + + base_name = self._endpoint.analysis_filename.replace(".json", "") + chunk_refs = [] + for i in range(0, max(total, 1), max_per_file): + chunk_num = i // max_per_file + 1 + chunk = slimmed[i : i + max_per_file] + if not chunk: + break + chunk_name = f"{base_name}_chunk_{chunk_num:03d}.json" + chunk_path = Path(host_workdir) / chunk_name + + # Per-chunk metadata + chunk_reasons = Counter(c.get("reason", "") for c in chunk) + chunk_data = { + "chunk": chunk_num, + "candidateCount": len(chunk), + "reasonBreakdown": dict(chunk_reasons.most_common()), + "deadCodeCandidates": chunk, + } + chunk_path.write_text(json.dumps(chunk_data, separators=(",", ":"))) + chunk_refs.append( + { + "file": chunk_name, + "candidateCount": len(chunk), + } + ) + + # Write the index file (what the agent reads first) + index_data = { + "metadataSummary": metadata_summary, + "chunkFiles": chunk_refs, + "entryPoints": slim_entry_points, + } + index_path = Path(host_workdir) / self._endpoint.analysis_filename + index_path.write_text(json.dumps(index_data, indent=2)) + + logger.info( + f"Placed analysis for {instance_id}: {total} candidates " + f"in {len(chunk_refs)} chunks, {len(slim_entry_points)} entry points " + f"(filtered: {type_filtered} types, {ep_filtered} entry points)" + ) + except Exception as e: + logger.error(f"Failed to get Supermodel analysis for {instance_id}: {e}") + print( + f"\n*** SUPERMODEL ANALYSIS FAILED for {instance_id} ***\n{traceback.format_exc()}", + file=sys.stderr, + flush=True, + ) + + # Start Docker container + container_name = f"mcpbr-{docker_manager._session_id}-{instance_id}" + container_workdir = "/workspace" + + container = docker_manager.client.containers.run( + image_name, + command="tail -f /dev/null", + name=container_name, + detach=True, + network_mode="bridge", + volumes={host_workdir: {"bind": "/workspace", "mode": "rw"}}, + working_dir=container_workdir, + remove=False, + labels={ + "mcpbr": "true", + "session_id": docker_manager._session_id, + "instance_id": instance_id, + }, + ) + + docker_manager._containers.append(container) + + env = TaskEnvironment( + container=container, + workdir=container_workdir, + host_workdir=host_workdir, + instance_id=instance_id, + uses_prebuilt=False, + claude_cli_installed=False, + ) + + # Init git so the harness can track modifications + init_git_workdir(host_workdir) + + return env + + async def _get_analysis( + self, + repo_dir: Path, + task_id: str, + scope_prefix: str | None, + exclude_patterns: list[str] | None = None, + strip_prefix: bool = True, + ) -> dict: + """Call Supermodel API and return parsed/filtered analysis. + + Results are cached in gt_dir/{task_id}_analysis.json keyed by zip hash + so subsequent runs skip the API call. + """ + zip_path = str(self._work_dir / f"{task_id}.zip") + await zip_repo(str(repo_dir), zip_path, scope_prefix, exclude_patterns) + + # Check cache + with open(zip_path, "rb") as f: + zip_hash = hashlib.sha256(f.read()).hexdigest()[:12] + cache_path = self.gt_dir / f"{task_id}_analysis_{zip_hash}.json" + if cache_path.exists(): + logger.info(f"Using cached analysis: {cache_path}") + with open(cache_path) as f: + return dict(json.load(f)) + + raw_response = await call_supermodel_api( + endpoint_path=self._endpoint.api_path, + zip_path=zip_path, + api_base=self.api_base, + api_key=self.api_key, + max_poll_time=self.api_timeout, + ) + + result = self._endpoint.parse_api_response(raw_response) + + # Strip scope_prefix from file paths so they match the workdir layout. + # Only in corpus mode (strip_prefix=True): workdir content is at root. + # In PR mode (strip_prefix=False): scope_prefix dir is preserved in workdir. + if scope_prefix and strip_prefix: + prefix = scope_prefix.rstrip("/") + "/" + for key in ("deadCodeCandidates", "candidates", "items"): + if key in result: + for item in result[key]: + fp = item.get("file", "") + if fp.startswith(prefix): + item["file"] = fp[len(prefix) :] + + # Cache the result for future runs + cache_path.write_text(json.dumps(result, indent=2)) + logger.info(f"Cached analysis at {cache_path}") + + return result + + async def evaluate( + self, + env: TaskEnvironment, + task: dict[str, Any], + solution: str, + ) -> dict[str, Any]: + """Evaluate by reading REPORT.json from the workspace and computing P/R/F1.""" + ground_truth = task.get("ground_truth", []) + key_fields = self._endpoint.key_fields + + # Read REPORT.json from host + report_path = Path(env.host_workdir) / "REPORT.json" + agent_findings: list[dict[str, Any]] = [] + + if report_path.exists(): + try: + with open(report_path) as f: + report = json.load(f) + agent_findings = report.get(self._endpoint.findings_key, []) + except (json.JSONDecodeError, OSError): + agent_findings = self._extract_findings_from_text(solution) + else: + agent_findings = self._extract_findings_from_text(solution) + + # Compute P/R/F1 + metrics = compute_prf1(agent_findings, ground_truth, key_fields) + + precision = metrics["precision"] + recall = metrics["recall"] + resolved = recall >= self.resolved_threshold + + # Log results + print(f"\n{'=' * 50}") + print(f"SUPERMODEL EVALUATION - {env.instance_id} ({self.analysis_type})") + print(f" Found: {metrics['found']} items") + print(f" Expected: {metrics['expected']} items") + print(f" True Positives: {metrics['true_positives']}") + print(f" False Positives: {metrics['false_positives']}") + print(f" False Negatives: {metrics['false_negatives']}") + print(f" Precision: {precision * 100:.1f}%") + print(f" Recall: {recall * 100:.1f}%") + print(f" F1 Score: {metrics['f1_score'] * 100:.1f}%") + print(f" Resolved: {resolved}") + print(f"{'=' * 50}\n") + + return { + "resolved": resolved, + **metrics, + } + + def _extract_findings_from_text(self, text: str) -> list[dict[str, Any]]: + """Extract findings from text/patch content as fallback.""" + return extract_findings_from_text(text, self._endpoint.findings_key) + + def get_prebuilt_image(self, task: dict[str, Any]) -> str | None: + return None + + def get_prompt_template(self) -> str: + return "{problem_statement}" diff --git a/src/mcpbr/benchmarks/supermodel/endpoints/__init__.py b/src/mcpbr/benchmarks/supermodel/endpoints/__init__.py new file mode 100644 index 0000000..8158f2f --- /dev/null +++ b/src/mcpbr/benchmarks/supermodel/endpoints/__init__.py @@ -0,0 +1,33 @@ +"""Endpoint plugin registry for Supermodel benchmarks.""" + +from .base import EndpointPlugin +from .circular_deps import CircularDepsPlugin +from .dead_code import DeadCodePlugin +from .impact_analysis import ImpactAnalysisPlugin +from .test_coverage import TestCoveragePlugin + +ENDPOINT_REGISTRY: dict[str, type[EndpointPlugin]] = { + "dead-code": DeadCodePlugin, + "impact": ImpactAnalysisPlugin, + "test-coverage": TestCoveragePlugin, + "circular-deps": CircularDepsPlugin, +} + + +def get_endpoint(name: str) -> EndpointPlugin: + """Get an endpoint plugin instance by name.""" + if name not in ENDPOINT_REGISTRY: + available = ", ".join(ENDPOINT_REGISTRY.keys()) + raise ValueError(f"Unknown endpoint: {name}. Available: {available}") + return ENDPOINT_REGISTRY[name]() + + +__all__ = [ + "ENDPOINT_REGISTRY", + "CircularDepsPlugin", + "DeadCodePlugin", + "EndpointPlugin", + "ImpactAnalysisPlugin", + "TestCoveragePlugin", + "get_endpoint", +] diff --git a/src/mcpbr/benchmarks/supermodel/endpoints/base.py b/src/mcpbr/benchmarks/supermodel/endpoints/base.py new file mode 100644 index 0000000..1ae2009 --- /dev/null +++ b/src/mcpbr/benchmarks/supermodel/endpoints/base.py @@ -0,0 +1,103 @@ +"""Abstract base class for Supermodel endpoint plugins.""" + +import re +import subprocess +from abc import ABC, abstractmethod + + +class EndpointPlugin(ABC): + """Base class for all endpoint benchmark plugins. + + Each endpoint defines: + - How to call the Supermodel API + - What prompts to give the baseline vs enhanced agent + - How to extract ground truth from a PR + - What tuple format to use for evaluation + """ + + @property + @abstractmethod + def name(self) -> str: + """Short identifier, e.g. 'dead_code'.""" + + @property + @abstractmethod + def api_path(self) -> str: + """API endpoint path, e.g. '/v1/analysis/dead-code'.""" + + @property + @abstractmethod + def baseline_prompt(self) -> str: + """Prompt for the baseline agent (no graph data).""" + + @property + @abstractmethod + def enhanced_prompt(self) -> str: + """Prompt for the graph-enhanced agent.""" + + @property + def analysis_filename(self) -> str: + """Filename for the analysis JSON placed in the workdir.""" + return f"supermodel_{self.name}_analysis.json" + + @property + def findings_key(self) -> str: + """Key in REPORT.json where findings are stored (e.g. 'dead_code').""" + return self.name + + @property + def key_fields(self) -> tuple[str, str]: + """Tuple field names for evaluation set comparison. + + Default: ("file", "name") -- works for dead code, impact analysis, test coverage. + Override for endpoints with different tuple shapes (e.g. circular deps). + """ + return ("file", "name") + + @abstractmethod + def extract_ground_truth( + self, + repo: str, + pr_number: int, + language: str = "typescript", + scope_prefix: str | None = None, + ) -> list[dict]: + """Extract ground truth from a GitHub PR diff. + + Returns a list of dicts with keys matching self.key_fields. + """ + + def report_placeholder(self) -> str: + """Return a JSON placeholder for REPORT.json using this endpoint's findings key.""" + return f'{{\n "{self.findings_key}": [],\n "analysis_complete": false\n}}\n' + + def parse_api_response(self, response: dict) -> dict: + """Transform raw API response into the JSON file placed in workdir for Claude. + + Default: pass through as-is. Override if the API response needs reshaping. + """ + return response + + def scope_prompt(self, prompt: str, scope_prefix: str | None) -> str: + """Append scope context to a prompt if a scope_prefix is set.""" + if scope_prefix: + return prompt + f"\n\nNote: Focus your analysis on the {scope_prefix} directory." + return prompt + + @staticmethod + def get_pr_diff(repo: str, pr_number: int) -> str: + """Fetch a PR diff from GitHub using the gh CLI.""" + result = subprocess.run( + ["gh", "pr", "diff", str(pr_number), "--repo", repo], + capture_output=True, + text=True, + timeout=120, + ) + if result.returncode != 0: + raise RuntimeError(f"Failed to get diff for {repo}#{pr_number}: {result.stderr}") + return result.stdout + + @staticmethod + def should_skip_file(filepath: str, skip_patterns: list[str]) -> bool: + """Check if a file should be skipped based on patterns.""" + return any(re.search(p, filepath) for p in skip_patterns) diff --git a/src/mcpbr/benchmarks/supermodel/endpoints/circular_deps.py b/src/mcpbr/benchmarks/supermodel/endpoints/circular_deps.py new file mode 100644 index 0000000..320a0da --- /dev/null +++ b/src/mcpbr/benchmarks/supermodel/endpoints/circular_deps.py @@ -0,0 +1,26 @@ +"""Stub for circular dependency analysis endpoint plugin.""" + +from .base import EndpointPlugin + + +class CircularDepsPlugin(EndpointPlugin): + """Circular dependency analysis endpoint (stub).""" + + @property + def name(self) -> str: + return "circular_deps" + + @property + def api_path(self) -> str: + return "/v1/analysis/circular-deps" + + @property + def baseline_prompt(self) -> str: + return "Find all circular dependencies in this repository." + + @property + def enhanced_prompt(self) -> str: + return "Using the dependency graph, identify all circular dependencies." + + def extract_ground_truth(self, repo, pr_number, language="typescript", scope_prefix=None): + raise NotImplementedError("CircularDepsPlugin.extract_ground_truth not implemented") diff --git a/src/mcpbr/benchmarks/supermodel/endpoints/dead_code.py b/src/mcpbr/benchmarks/supermodel/endpoints/dead_code.py new file mode 100644 index 0000000..e861709 --- /dev/null +++ b/src/mcpbr/benchmarks/supermodel/endpoints/dead_code.py @@ -0,0 +1,287 @@ +"""Dead code detection endpoint plugin. + +Ground truth: PRs that remove dead code (unused exported functions/classes/consts). +Tuple format: (file, symbol_name) +""" + +import re +from dataclasses import dataclass + +from .base import EndpointPlugin + +# Patterns for TypeScript/JavaScript exported declarations +TS_PATTERNS = [ + (r"^-\s*export\s+(?:async\s+)?function\s+(\w+)", "function"), + (r"^-\s*export\s+class\s+(\w+)", "class"), + (r"^-\s*export\s+const\s+(\w+)\s*[=:]", "const"), + (r"^-\s*export\s+default\s+(?:async\s+)?function\s+(\w+)", "function"), + (r"^-\s*export\s+default\s+class\s+(\w+)", "class"), +] + +# Patterns for Python declarations +PY_PATTERNS = [ + (r"^-\s*def\s+(\w+)\s*[\(\[]", "function"), + (r"^-\s*async\s+def\s+(\w+)\s*[\(\[]", "function"), + (r"^-\s*class\s+(\w+)[\s(:\[]", "class"), + (r"^-\s*(_?[A-Z][A-Z_0-9]+)\s*[=:]", "const"), +] + +SKIP_FILE_PATTERNS = [ + r"\.test\.", + r"\.spec\.(ts|tsx|js|jsx)$", + r"__tests__/", + r"test/", + r"tests/", + r"\.stories\.", + r"\.d\.ts$", + r"__mocks__/", + r"\.config\.", + r"package\.json", + r"package-lock\.json", + r"tsconfig", + r"\.cue$", + r"\.go$", + r"\.rs$", +] + +SKIP_NAMES = { + "default", + "module", + "exports", + "require", + "test", + "describe", + "it", + "expect", + "beforeEach", + "afterEach", + "beforeAll", + "afterAll", +} + + +@dataclass +class RemovedDeclaration: + file: str + name: str + type: str + line_content: str + + +class DeadCodePlugin(EndpointPlugin): + @property + def name(self) -> str: + return "dead_code" + + @property + def api_path(self) -> str: + return "/v1/analysis/dead-code" + + @property + def baseline_prompt(self) -> str: + return """You are an expert software architect. Find all dead code in this repository. + +Analyze the codebase and identify ALL functions, classes, methods, and exported +constants that are dead code (never called, never imported, never referenced). + +Focus on exported symbols -- functions, classes, and constants that are exported but +never imported or used anywhere in the codebase. + +Do NOT include: +- Type definitions, interfaces, or enums (only runtime code) +- Test files or test utilities +- Entry points (main functions, CLI handlers, route handlers) +- Framework lifecycle hooks or decorators + +CRITICAL: Update the existing REPORT.json file with your findings. +Format: a JSON object with "dead_code" array containing objects with file, name, type, and reason. +Set "analysis_complete" to true when done. +""" + + @property + def enhanced_prompt(self) -> str: + return """Read the file supermodel_dead_code_analysis.json in the current directory. + +It contains a pre-computed static analysis. The deadCodeCandidates array lists functions +and classes that are exported but never imported or called anywhere in the codebase. + +Filter out obvious false positives from the candidates: +- Framework lifecycle methods (execute, up, down, Template, etc.) +- Storybook stories and test utilities +- Classes loaded via dependency injection or plugin systems +- Database migration methods + +CRITICAL: Update the existing REPORT.json file with your filtered findings. +Format: a JSON object with "dead_code" array containing objects with file, name, type, and reason. +Set "analysis_complete" to true when done. + +Do NOT search the codebase. Just read the analysis file, filter, and update REPORT.json. +""" + + @property + def enhanced_prompt_v2(self) -> str: + return """You are an expert software architect. A static analyzer has pre-computed dead code +candidates for this codebase. Your job is to FILTER them using the metadata provided. + +STEP 1: Read `supermodel_dead_code_analysis.json`. It contains: +- `metadataSummary`: totalCandidates, rootFilesCount, reasonBreakdown, confidenceBreakdown +- `chunkFiles`: list of chunk files with candidate details +- `entryPoints`: symbols confirmed alive — any candidate matching an entry point is a false positive + +If there are chunk files, read ALL of them. + +STEP 2: Understand the analysis quality. +- Check `rootFilesCount` — if it's much higher than expected (>20), the import + resolver likely failed on many files, meaning "file never imported" candidates + have a high false positive rate for framework-wired code. +- Check `reasonBreakdown` to understand where candidates come from. + +STEP 3: Write a script to filter candidates and produce REPORT.json: + +```python +import json, glob + +with open("supermodel_dead_code_analysis.json") as f: + index = json.load(f) + +# Load entry points as a whitelist +entry_set = set() +for ep in index.get("entryPoints", []): + entry_set.add((ep.get("file", ""), ep.get("name", ""))) + +# Load all candidates from chunk files +candidates = [] +for chunk_ref in index.get("chunkFiles", []): + with open(chunk_ref["file"]) as f: + chunk = json.load(f) + candidates.extend(chunk.get("deadCodeCandidates", [])) + +# Filter +dead_code = [] +for c in candidates: + key = (c.get("file", ""), c.get("name", "")) + reason = c.get("reason", "") + confidence = c.get("confidence", "") + + # Drop entry points + if key in entry_set: + continue + + # Drop pure type/interface candidates (high FP rate from structural typing) + if "Type/interface" in reason: + continue + + # Keep everything else — the graph already did import/call analysis + dead_code.append({ + "file": c.get("file", ""), + "name": c.get("name", ""), + "type": c.get("type", "function"), + "reason": reason + }) + +with open("REPORT.json", "w") as f: + json.dump({"dead_code": dead_code, "analysis_complete": True}, f, indent=2) +print(f"Wrote {len(dead_code)} candidates to REPORT.json") +``` + +STEP 4: Run the script, then read REPORT.json to confirm it was written correctly. + +RULES: +- Do NOT grep the codebase to verify candidates. The static analyzer already + performed call graph and dependency analysis — grep produces false negatives + when symbol names appear in comments, strings, or type-only imports. +- Trust the graph. Filter only using the metadata (reason, confidence, entryPoints). +- Type should be one of: function, class, method, const, interface, variable. +- When in doubt about a candidate, INCLUDE it — missing real dead code is worse + than a false positive. +""" + + def parse_api_response(self, response: dict) -> dict: + """Pre-filter the API response to remove obvious framework false positives. + + Args: + response: Raw API response dict. + """ + candidates = response.get("deadCodeCandidates", []) + + framework_names = re.compile( + r"^(execute|up|down|Template|Story|stories|test|Test|Mock|mock|" + r"Fixture|Spec|Suite|describe|it|expect|beforeEach|afterEach|" + r"setUp|tearDown|default|module|exports|require)$" + ) + framework_files = re.compile( + r"(\.test\.|\.spec\.|\.stories\.|__tests__|__mocks__|" + r"\.storybook|\.e2e\.|migrations/|\.d\.ts$)" + ) + + filtered = [] + for c in candidates: + name = c.get("name", "") + filepath = c.get("file", "") + if framework_names.match(name): + continue + if framework_files.search(filepath): + continue + filtered.append(c) + + response = dict(response) + response["deadCodeCandidates"] = filtered + response["metadata"] = dict(response.get("metadata", {})) + response["metadata"]["filteredCount"] = len(filtered) + response["metadata"]["rawCount"] = len(candidates) + return response + + def extract_ground_truth( + self, + repo: str, + pr_number: int, + language: str = "typescript", + scope_prefix: str | None = None, + ) -> list[dict]: + diff = self.get_pr_diff(repo, pr_number) + declarations = _parse_diff(diff, language) + if scope_prefix: + declarations = [d for d in declarations if d.file.startswith(scope_prefix)] + return [{"file": d.file, "name": d.name, "type": d.type} for d in declarations] + + +def _parse_diff(diff_text: str, language: str = "typescript") -> list[RemovedDeclaration]: + patterns = TS_PATTERNS if language == "typescript" else PY_PATTERNS + declarations = [] + current_file = None + seen: set[tuple[str, str]] = set() + + for line in diff_text.split("\n"): + if line.startswith("diff --git"): + parts = line.split(" b/") + if len(parts) >= 2: + current_file = parts[-1] + continue + + if not line.startswith("-") or line.startswith("---"): + continue + if current_file is None: + continue + if EndpointPlugin.should_skip_file(current_file, SKIP_FILE_PATTERNS): + continue + + for pattern, decl_type in patterns: + match = re.match(pattern, line) + if match: + name = match.group(1) + if name in SKIP_NAMES: + continue + key = (current_file, name) + if key not in seen: + seen.add(key) + declarations.append( + RemovedDeclaration( + file=current_file, + name=name, + type=decl_type, + line_content=line.lstrip("-").strip(), + ) + ) + break + + return declarations diff --git a/src/mcpbr/benchmarks/supermodel/endpoints/impact_analysis.py b/src/mcpbr/benchmarks/supermodel/endpoints/impact_analysis.py new file mode 100644 index 0000000..bdac795 --- /dev/null +++ b/src/mcpbr/benchmarks/supermodel/endpoints/impact_analysis.py @@ -0,0 +1,26 @@ +"""Stub for impact analysis endpoint plugin.""" + +from .base import EndpointPlugin + + +class ImpactAnalysisPlugin(EndpointPlugin): + """Impact analysis endpoint (stub).""" + + @property + def name(self) -> str: + return "impact_analysis" + + @property + def api_path(self) -> str: + return "/v1/analysis/impact" + + @property + def baseline_prompt(self) -> str: + return "Analyze the impact of changes in this repository." + + @property + def enhanced_prompt(self) -> str: + return "Using the dependency graph, analyze the impact of changes." + + def extract_ground_truth(self, repo, pr_number, language="typescript", scope_prefix=None): + raise NotImplementedError("ImpactAnalysisPlugin.extract_ground_truth not implemented") diff --git a/src/mcpbr/benchmarks/supermodel/endpoints/test_coverage.py b/src/mcpbr/benchmarks/supermodel/endpoints/test_coverage.py new file mode 100644 index 0000000..210e85f --- /dev/null +++ b/src/mcpbr/benchmarks/supermodel/endpoints/test_coverage.py @@ -0,0 +1,26 @@ +"""Stub for test coverage endpoint plugin.""" + +from .base import EndpointPlugin + + +class TestCoveragePlugin(EndpointPlugin): + """Test coverage analysis endpoint (stub).""" + + @property + def name(self) -> str: + return "test_coverage" + + @property + def api_path(self) -> str: + return "/v1/analysis/test-coverage" + + @property + def baseline_prompt(self) -> str: + return "Identify untested code in this repository." + + @property + def enhanced_prompt(self) -> str: + return "Using the dependency graph, identify untested code." + + def extract_ground_truth(self, repo, pr_number, language="typescript", scope_prefix=None): + raise NotImplementedError("TestCoveragePlugin.extract_ground_truth not implemented") diff --git a/src/mcpbr/benchmarks/supermodel/evaluation.py b/src/mcpbr/benchmarks/supermodel/evaluation.py new file mode 100644 index 0000000..6634cd3 --- /dev/null +++ b/src/mcpbr/benchmarks/supermodel/evaluation.py @@ -0,0 +1,85 @@ +"""P/R/F1 set-based evaluation for Supermodel benchmarks.""" + +import logging + +logger = logging.getLogger("mcpbr.supermodel") + + +def normalize_path(filepath: str) -> str: + """Normalize file path for comparison.""" + p = filepath.replace("\\", "/") + while p.startswith("./"): + p = p[2:] + p = p.lstrip("/") + return p + + +def normalize_name(name: str) -> str: + """Normalize symbol name for comparison.""" + return name.strip() + + +def build_comparison_set( + items: list[dict], + key_fields: tuple[str, str] = ("file", "name"), +) -> set[tuple[str, str]]: + """Build a set of normalized tuples from prediction/ground truth items. + + Args: + items: List of dicts with the key fields. + key_fields: Tuple of (field_a, field_b) to extract. + + Returns: + Set of normalized (field_a_value, field_b_value) tuples. + """ + result = set() + fa, fb = key_fields + path_like_fields = {"file", "module_a", "module_b"} + for item in items: + raw_a = item.get(fa, "") + raw_b = item.get(fb, "") + a = normalize_path(raw_a) if fa in path_like_fields else normalize_name(raw_a) + b = normalize_path(raw_b) if fb in path_like_fields else normalize_name(raw_b) + if a and b: + result.add((a, b)) + elif items: + logger.debug("Dropped item with empty field: %s=%r, %s=%r", fa, raw_a, fb, raw_b) + return result + + +def compute_prf1( + predictions: list[dict], + ground_truth: list[dict], + key_fields: tuple[str, str] = ("file", "name"), +) -> dict: + """Compute precision, recall, F1 from predictions vs ground truth. + + Args: + predictions: List of prediction dicts. + ground_truth: List of ground truth dicts. + key_fields: Fields to use for set comparison. + + Returns: + Dict with precision, recall, f1_score, tp, fp, fn counts, and resolved boolean. + """ + pred_set = build_comparison_set(predictions, key_fields) + gt_set = build_comparison_set(ground_truth, key_fields) + + tp = len(pred_set & gt_set) + fp = len(pred_set - gt_set) + fn = len(gt_set - pred_set) + + precision = tp / (tp + fp) if (tp + fp) > 0 else 0.0 + recall = tp / (tp + fn) if (tp + fn) > 0 else 0.0 + f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0.0 + + return { + "precision": round(precision, 3), + "recall": round(recall, 3), + "f1_score": round(f1, 3), + "true_positives": tp, + "false_positives": fp, + "false_negatives": fn, + "found": len(pred_set), + "expected": len(gt_set), + } diff --git a/src/mcpbr/benchmarks/supermodel/git_utils.py b/src/mcpbr/benchmarks/supermodel/git_utils.py new file mode 100644 index 0000000..f75c5b3 --- /dev/null +++ b/src/mcpbr/benchmarks/supermodel/git_utils.py @@ -0,0 +1,194 @@ +"""Git utilities for cloning repos and creating zip archives.""" + +import asyncio +import logging + +logger = logging.getLogger("mcpbr.supermodel") + + +async def clone_repo_at_commit(repo: str, commit: str, dest: str) -> None: + """Clone a repo and checkout a specific commit. + + Args: + repo: GitHub repo in 'owner/name' format, or a full clone URL. + commit: Git commit SHA to checkout. + dest: Destination directory path. + """ + logger.info(f"Cloning {repo} at {commit[:8]} -> {dest}") + + # Support full URLs (https://, git://, ssh://) or owner/name shorthand + if repo.startswith(("https://", "http://", "git://", "ssh://", "git@")): + clone_url = repo + else: + clone_url = f"https://github.com/{repo}.git" + + proc = await asyncio.create_subprocess_exec( + "git", + "clone", + "--quiet", + "--depth", + "1", + clone_url, + dest, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + _, stderr = await asyncio.wait_for(proc.communicate(), timeout=300) + if proc.returncode != 0: + raise RuntimeError(f"Clone failed: {stderr.decode()}") + + proc = await asyncio.create_subprocess_exec( + "git", + "fetch", + "--quiet", + "--depth", + "1", + "origin", + commit, + cwd=dest, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + _, stderr = await asyncio.wait_for(proc.communicate(), timeout=300) + if proc.returncode != 0: + raise RuntimeError(f"Fetch failed: {stderr.decode()}") + + proc = await asyncio.create_subprocess_exec( + "git", + "checkout", + "--quiet", + commit, + cwd=dest, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + _, stderr = await asyncio.wait_for(proc.communicate(), timeout=60) + if proc.returncode != 0: + raise RuntimeError(f"Checkout failed: {stderr.decode()}") + + +async def get_pre_merge_commit(repo: str, merge_commit: str) -> str: + """Get the first parent of a merge commit (pre-merge state). + + Args: + repo: GitHub repo in 'owner/name' format. + merge_commit: Merge commit SHA. + + Returns: + SHA of the first parent commit. + """ + proc = await asyncio.create_subprocess_exec( + "gh", + "api", + f"repos/{repo}/commits/{merge_commit}", + "--jq", + ".parents[0].sha", + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + stdout, stderr = await asyncio.wait_for(proc.communicate(), timeout=30) + if proc.returncode != 0: + raise RuntimeError(f"Failed to get parent of {merge_commit}: {stderr.decode()}") + return stdout.decode().strip() + + +async def zip_repo( + repo_dir: str, + output_zip: str, + scope_prefix: str | None = None, + exclude_patterns: list[str] | None = None, +) -> str: + """Create a zip of the repo for Supermodel API. + + Uses ``git archive`` when possible (recommended by Supermodel docs) since it + only includes tracked files and automatically respects .gitignore. Falls back + to ``zip -r`` with exclude patterns for non-git directories. + + Args: + repo_dir: Path to the repository directory. + output_zip: Path for the output zip file. + scope_prefix: Optional subdirectory to scope the archive to. + exclude_patterns: Optional glob patterns to exclude (e.g. ["loc/*", "lib/*"]). + + Returns: + Path to the created zip file. + """ + import os + + is_git = os.path.isdir(os.path.join(repo_dir, ".git")) + + if is_git: + return await _zip_repo_git_archive(repo_dir, output_zip, scope_prefix) + else: + return await _zip_repo_fallback(repo_dir, output_zip, scope_prefix, exclude_patterns) + + +async def _zip_repo_git_archive( + repo_dir: str, + output_zip: str, + scope_prefix: str | None = None, +) -> str: + """Create zip using ``git archive`` — only includes tracked files.""" + cmd = ["git", "archive", "--format=zip", "-o", output_zip, "HEAD"] + if scope_prefix: + cmd.append(scope_prefix) + + proc = await asyncio.create_subprocess_exec( + *cmd, + cwd=repo_dir, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + _, stderr = await asyncio.wait_for(proc.communicate(), timeout=120) + if proc.returncode != 0: + raise RuntimeError(f"git archive failed: {stderr.decode()}") + return output_zip + + +async def _zip_repo_fallback( + repo_dir: str, + output_zip: str, + scope_prefix: str | None = None, + exclude_patterns: list[str] | None = None, +) -> str: + """Fallback: create zip using ``zip -r`` with exclude patterns.""" + zip_target = scope_prefix if scope_prefix else "." + base_excludes = [ + "node_modules/*", + ".git/*", + "dist/*", + "build/*", + "target/*", + ".next/*", + "__pycache__/*", + "*.pyc", + "venv/*", + ".venv/*", + "vendor/*", + ".idea/*", + ".vscode/*", + "coverage/*", + ".nyc_output/*", + ] + # Prepend scope_prefix to exclude patterns so they match archive paths + prefixed_excludes = [] + for pattern in base_excludes + (exclude_patterns or []): + if scope_prefix and not pattern.startswith(scope_prefix): + prefixed_excludes.append(f"{scope_prefix}/{pattern}") + else: + prefixed_excludes.append(pattern) + + cmd = ["zip", "-r", "-q", output_zip, zip_target] + for pattern in prefixed_excludes: + cmd.extend(["-x", pattern]) + + proc = await asyncio.create_subprocess_exec( + *cmd, + cwd=repo_dir, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + _, stderr = await asyncio.wait_for(proc.communicate(), timeout=120) + if proc.returncode != 0: + raise RuntimeError(f"zip failed: {stderr.decode()}") + return output_zip diff --git a/src/mcpbr/config.py b/src/mcpbr/config.py index 132d3e9..1ae4f5b 100644 --- a/src/mcpbr/config.py +++ b/src/mcpbr/config.py @@ -47,6 +47,8 @@ "longbench", "adversarial", "codegraph", + "dead-code", + "supermodel", ) VALID_INFRASTRUCTURE_MODES = ("local", "azure", "aws", "gcp", "kubernetes", "cloudflare") @@ -872,6 +874,44 @@ def validate_thinking_budget(cls, v: int | None) -> int | None: description="Send progress notification every N minutes (0 = disabled).", ) + # --- Supermodel Benchmark --- + analysis_type: str | None = Field( + default=None, + description="Supermodel analysis type (dead-code, impact, test-coverage, circular-deps)", + ) + + tasks: list[dict[str, Any]] | None = Field( + default=None, + description="Task definitions for supermodel benchmark (repos, PRs, commits)", + ) + + supermodel_api_base: str = Field( + default="https://api.supermodel.dev", + description="Base URL for the Supermodel API", + ) + + supermodel_api_key: str | None = Field( + default=None, + description="API key for Supermodel API (or use SUPERMODEL_API_KEY env var)", + ) + + supermodel_api_timeout: int = Field( + default=900, + description="Max seconds to wait for Supermodel API analysis to complete", + ) + + resolved_threshold: float = Field( + default=0.8, + ge=0.0, + le=1.0, + description="Recall threshold to consider a task resolved (must be in [0.0, 1.0])", + ) + + ground_truth_dir: str | None = Field( + default=None, + description="Directory to cache ground truth JSON files", + ) + @field_validator("notify_progress_interval", "notify_progress_time_minutes") @classmethod def validate_notify_progress_intervals(cls, v: int) -> int: diff --git a/src/mcpbr/docker_env.py b/src/mcpbr/docker_env.py index 2f0863a..c234315 100644 --- a/src/mcpbr/docker_env.py +++ b/src/mcpbr/docker_env.py @@ -462,7 +462,9 @@ def _build() -> None: rm=True, ) else: - self.client.images.pull("python:3.11-slim") + self.client.images.pull( + "mcr.microsoft.com/mirror/docker/library/python:3.11-slim" + ) self._use_fallback_image() loop = asyncio.get_event_loop() diff --git a/src/mcpbr/harness.py b/src/mcpbr/harness.py index 1dfd60a..21f1e06 100644 --- a/src/mcpbr/harness.py +++ b/src/mcpbr/harness.py @@ -1204,6 +1204,15 @@ async def run_evaluation( benchmark_kwargs: dict[str, Any] = {} if config.benchmark == "cybergym": benchmark_kwargs["level"] = config.cybergym_level + elif config.benchmark in ("dead-code", "supermodel"): + benchmark_kwargs["resolved_threshold"] = config.resolved_threshold + if config.benchmark == "supermodel": + benchmark_kwargs["analysis_type"] = config.analysis_type or "dead-code" + benchmark_kwargs["tasks"] = config.tasks + benchmark_kwargs["supermodel_api_base"] = config.supermodel_api_base + benchmark_kwargs["supermodel_api_key"] = config.supermodel_api_key + benchmark_kwargs["supermodel_api_timeout"] = config.supermodel_api_timeout + benchmark_kwargs["ground_truth_dir"] = config.ground_truth_dir benchmark = create_benchmark(config.benchmark, **benchmark_kwargs) diff --git a/src/mcpbr/models.py b/src/mcpbr/models.py index 32daf7f..4240646 100644 --- a/src/mcpbr/models.py +++ b/src/mcpbr/models.py @@ -17,7 +17,22 @@ class ModelInfo: SUPPORTED_MODELS: dict[str, ModelInfo] = { # Anthropic models via Claude Code CLI - # Claude 4.5 models (latest) + # Claude 4.6 models (latest) + "claude-opus-4-6": ModelInfo( + id="claude-opus-4-6", + provider="Anthropic", + display_name="Claude Opus 4.6", + context_window=200000, + notes="Most capable Claude model (March 2026)", + ), + "claude-sonnet-4-6": ModelInfo( + id="claude-sonnet-4-6", + provider="Anthropic", + display_name="Claude Sonnet 4.6", + context_window=200000, + notes="Balanced performance and cost (March 2026)", + ), + # Claude 4.5 models "claude-opus-4-5-20251101": ModelInfo( id="claude-opus-4-5-20251101", provider="Anthropic", diff --git a/src/mcpbr/pricing.py b/src/mcpbr/pricing.py index 8e263d4..21f84b1 100644 --- a/src/mcpbr/pricing.py +++ b/src/mcpbr/pricing.py @@ -31,7 +31,28 @@ class ModelPricing: # Model pricing database (as of January 2026) # Prices are per million tokens (MTok) MODEL_PRICING: dict[str, ModelPricing] = { - # Claude 4.5 Series (Latest - 2026) + # Claude 4.6 Series (Latest - March 2026) + "claude-opus-4-6": ModelPricing( + model_id="claude-opus-4-6", + provider="Anthropic", + input_price_per_mtok=5.00, + output_price_per_mtok=25.00, + supports_prompt_caching=True, + cache_creation_price_per_mtok=6.25, + cache_read_price_per_mtok=0.50, + notes="Most capable Claude 4.6 model", + ), + "claude-sonnet-4-6": ModelPricing( + model_id="claude-sonnet-4-6", + provider="Anthropic", + input_price_per_mtok=3.00, + output_price_per_mtok=15.00, + supports_prompt_caching=True, + cache_creation_price_per_mtok=3.75, + cache_read_price_per_mtok=0.30, + notes="Balanced Claude 4.6 model", + ), + # Claude 4.5 Series "claude-opus-4-5-20251101": ModelPricing( model_id="claude-opus-4-5-20251101", provider="Anthropic", diff --git a/uv.lock b/uv.lock index ef2aaf8..bfeb7e8 100644 --- a/uv.lock +++ b/uv.lock @@ -190,29 +190,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/3a/2a/7cc015f5b9f5db42b7d48157e23356022889fc354a2813c15934b7cb5c0e/attrs-25.4.0-py3-none-any.whl", hash = "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373", size = 67615, upload-time = "2025-10-06T13:54:43.17Z" }, ] -[[package]] -name = "babel" -version = "2.17.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/7d/6b/d52e42361e1aa00709585ecc30b3f9684b3ab62530771402248b1b1d6240/babel-2.17.0.tar.gz", hash = "sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d", size = 9951852, upload-time = "2025-02-01T15:17:41.026Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b7/b8/3fe70c75fe32afc4bb507f75563d39bc5642255d1d94f1f23604725780bf/babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2", size = 10182537, upload-time = "2025-02-01T15:17:37.39Z" }, -] - -[[package]] -name = "backrefs" -version = "6.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/86/e3/bb3a439d5cb255c4774724810ad8073830fac9c9dee123555820c1bcc806/backrefs-6.1.tar.gz", hash = "sha256:3bba1749aafe1db9b915f00e0dd166cba613b6f788ffd63060ac3485dc9be231", size = 7011962, upload-time = "2025-11-15T14:52:08.323Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/3b/ee/c216d52f58ea75b5e1841022bbae24438b19834a29b163cb32aa3a2a7c6e/backrefs-6.1-py310-none-any.whl", hash = "sha256:2a2ccb96302337ce61ee4717ceacfbf26ba4efb1d55af86564b8bbaeda39cac1", size = 381059, upload-time = "2025-11-15T14:51:59.758Z" }, - { url = "https://files.pythonhosted.org/packages/e6/9a/8da246d988ded941da96c7ed945d63e94a445637eaad985a0ed88787cb89/backrefs-6.1-py311-none-any.whl", hash = "sha256:e82bba3875ee4430f4de4b6db19429a27275d95a5f3773c57e9e18abc23fd2b7", size = 392854, upload-time = "2025-11-15T14:52:01.194Z" }, - { url = "https://files.pythonhosted.org/packages/37/c9/fd117a6f9300c62bbc33bc337fd2b3c6bfe28b6e9701de336b52d7a797ad/backrefs-6.1-py312-none-any.whl", hash = "sha256:c64698c8d2269343d88947c0735cb4b78745bd3ba590e10313fbf3f78c34da5a", size = 398770, upload-time = "2025-11-15T14:52:02.584Z" }, - { url = "https://files.pythonhosted.org/packages/eb/95/7118e935b0b0bd3f94dfec2d852fd4e4f4f9757bdb49850519acd245cd3a/backrefs-6.1-py313-none-any.whl", hash = "sha256:4c9d3dc1e2e558965202c012304f33d4e0e477e1c103663fd2c3cc9bb18b0d05", size = 400726, upload-time = "2025-11-15T14:52:04.093Z" }, - { url = "https://files.pythonhosted.org/packages/1d/72/6296bad135bfafd3254ae3648cd152980a424bd6fed64a101af00cc7ba31/backrefs-6.1-py314-none-any.whl", hash = "sha256:13eafbc9ccd5222e9c1f0bec563e6d2a6d21514962f11e7fc79872fd56cbc853", size = 412584, upload-time = "2025-11-15T14:52:05.233Z" }, - { url = "https://files.pythonhosted.org/packages/02/e3/a4fa1946722c4c7b063cc25043a12d9ce9b4323777f89643be74cef2993c/backrefs-6.1-py39-none-any.whl", hash = "sha256:a9e99b8a4867852cad177a6430e31b0f6e495d65f8c6c134b68c14c3c95bf4b0", size = 381058, upload-time = "2025-11-15T14:52:06.698Z" }, -] - [[package]] name = "bcrypt" version = "5.0.0" @@ -524,12 +501,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/bc/58/6b3d24e6b9bc474a2dcdee65dfd1f008867015408a271562e4b690561a4d/cryptography-46.0.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:8456928655f856c6e1533ff59d5be76578a7157224dbd9ce6872f25055ab9ab7", size = 3407605, upload-time = "2026-02-10T19:18:29.233Z" }, ] -[[package]] -name = "csscompressor" -version = "0.9.5" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f1/2a/8c3ac3d8bc94e6de8d7ae270bb5bc437b210bb9d6d9e46630c98f4abd20c/csscompressor-0.9.5.tar.gz", hash = "sha256:afa22badbcf3120a4f392e4d22f9fff485c044a1feda4a950ecc5eba9dd31a05", size = 237808, upload-time = "2017-11-26T21:13:08.238Z" } - [[package]] name = "datasets" version = "4.5.0" @@ -733,18 +704,6 @@ http = [ { name = "aiohttp" }, ] -[[package]] -name = "ghp-import" -version = "2.1.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "python-dateutil" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/d9/29/d40217cbe2f6b1359e00c6c307bb3fc876ba74068cbab3dde77f03ca0dc4/ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343", size = 10943, upload-time = "2022-05-02T15:47:16.11Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/f7/ec/67fbef5d497f86283db54c22eec6f6140243aae73265799baaaa19cd17fb/ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619", size = 11034, upload-time = "2022-05-02T15:47:14.552Z" }, -] - [[package]] name = "gitdb" version = "4.0.12" @@ -879,18 +838,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c4/ab/09169d5a4612a5f92490806649ac8d41e3ec9129c636754575b3553f4ea4/googleapis_common_protos-1.72.0-py3-none-any.whl", hash = "sha256:4299c5a82d5ae1a9702ada957347726b167f9f8d1fc352477702a1e851ff4038", size = 297515, upload-time = "2025-11-06T18:29:13.14Z" }, ] -[[package]] -name = "griffe" -version = "1.15.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "colorama" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/0d/0c/3a471b6e31951dce2360477420d0a8d1e00dea6cf33b70f3e8c3ab6e28e1/griffe-1.15.0.tar.gz", hash = "sha256:7726e3afd6f298fbc3696e67958803e7ac843c1cfe59734b6251a40cdbfb5eea", size = 424112, upload-time = "2025-11-10T15:03:15.52Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/9c/83/3b1d03d36f224edded98e9affd0467630fc09d766c0e56fb1498cbb04a9b/griffe-1.15.0-py3-none-any.whl", hash = "sha256:6f6762661949411031f5fcda9593f586e6ce8340f0ba88921a0f2ef7a81eb9a3", size = 150705, upload-time = "2025-11-10T15:03:13.549Z" }, -] - [[package]] name = "grpcio" version = "1.78.0" @@ -994,14 +941,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/cb/44/870d44b30e1dcfb6a65932e3e1506c103a8a5aea9103c337e7a53180322c/hf_xet-1.2.0-cp37-abi3-win_amd64.whl", hash = "sha256:e6584a52253f72c9f52f9e549d5895ca7a471608495c4ecaa6cc73dba2b24d69", size = 2905735, upload-time = "2025-10-24T19:04:35.928Z" }, ] -[[package]] -name = "htmlmin2" -version = "0.1.13" -source = { registry = "https://pypi.org/simple" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/be/31/a76f4bfa885f93b8167cb4c85cf32b54d1f64384d0b897d45bc6d19b7b45/htmlmin2-0.1.13-py3-none-any.whl", hash = "sha256:75609f2a42e64f7ce57dbff28a39890363bde9e7e5885db633317efbdf8c79a2", size = 34486, upload-time = "2023-03-14T21:28:30.388Z" }, -] - [[package]] name = "httpcore" version = "1.0.9" @@ -1108,18 +1047,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/32/4b/b99e37f88336009971405cbb7630610322ed6fbfa31e1d7ab3fbf3049a2d/invoke-2.2.1-py3-none-any.whl", hash = "sha256:2413bc441b376e5cd3f55bb5d364f973ad8bdd7bf87e53c79de3c11bf3feecc8", size = 160287, upload-time = "2025-10-11T00:36:33.703Z" }, ] -[[package]] -name = "jinja2" -version = "3.1.6" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "markupsafe" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" }, -] - [[package]] name = "jiter" version = "0.12.0" @@ -1205,12 +1132,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/2f/9c/6753e6522b8d0ef07d3a3d239426669e984fb0eba15a315cdbc1253904e4/jiter-0.12.0-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c24e864cb30ab82311c6425655b0cdab0a98c5d973b065c66a3f020740c2324c", size = 346110, upload-time = "2025-11-09T20:49:21.817Z" }, ] -[[package]] -name = "jsmin" -version = "3.0.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/5e/73/e01e4c5e11ad0494f4407a3f623ad4d87714909f50b17a06ed121034ff6e/jsmin-3.0.1.tar.gz", hash = "sha256:c0959a121ef94542e807a674142606f7e90214a2b3d1eb17300244bbb5cc2bfc", size = 13925, upload-time = "2022-01-16T20:35:59.13Z" } - [[package]] name = "jsonschema" version = "4.26.0" @@ -1311,15 +1232,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/94/d1/433b3c06e78f23486fe4fdd19bc134657eb30997d2054b0dbf52bbf3382e/librt-0.8.0-cp314-cp314t-win_arm64.whl", hash = "sha256:92249938ab744a5890580d3cb2b22042f0dce71cdaa7c1369823df62bedf7cbc", size = 48753, upload-time = "2026-02-12T14:53:38.539Z" }, ] -[[package]] -name = "markdown" -version = "3.10.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/b7/b1/af95bcae8549f1f3fd70faacb29075826a0d689a27f232e8cee315efa053/markdown-3.10.1.tar.gz", hash = "sha256:1c19c10bd5c14ac948c53d0d762a04e2fa35a6d58a6b7b1e6bfcbe6fefc0001a", size = 365402, upload-time = "2026-01-21T18:09:28.206Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/59/1b/6ef961f543593969d25b2afe57a3564200280528caa9bd1082eecdd7b3bc/markdown-3.10.1-py3-none-any.whl", hash = "sha256:867d788939fe33e4b736426f5b9f651ad0c0ae0ecf89df0ca5d1176c70812fe3", size = 107684, upload-time = "2026-01-21T18:09:27.203Z" }, -] - [[package]] name = "markdown-it-py" version = "4.0.0" @@ -1332,80 +1244,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/94/54/e7d793b573f298e1c9013b8c4dade17d481164aa517d1d7148619c2cedbf/markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147", size = 87321, upload-time = "2025-08-11T12:57:51.923Z" }, ] -[[package]] -name = "markupsafe" -version = "3.0.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/7e/99/7690b6d4034fffd95959cbe0c02de8deb3098cc577c67bb6a24fe5d7caa7/markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698", size = 80313, upload-time = "2025-09-27T18:37:40.426Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/08/db/fefacb2136439fc8dd20e797950e749aa1f4997ed584c62cfb8ef7c2be0e/markupsafe-3.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1cc7ea17a6824959616c525620e387f6dd30fec8cb44f649e31712db02123dad", size = 11631, upload-time = "2025-09-27T18:36:18.185Z" }, - { url = "https://files.pythonhosted.org/packages/e1/2e/5898933336b61975ce9dc04decbc0a7f2fee78c30353c5efba7f2d6ff27a/markupsafe-3.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4bd4cd07944443f5a265608cc6aab442e4f74dff8088b0dfc8238647b8f6ae9a", size = 12058, upload-time = "2025-09-27T18:36:19.444Z" }, - { url = "https://files.pythonhosted.org/packages/1d/09/adf2df3699d87d1d8184038df46a9c80d78c0148492323f4693df54e17bb/markupsafe-3.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b5420a1d9450023228968e7e6a9ce57f65d148ab56d2313fcd589eee96a7a50", size = 24287, upload-time = "2025-09-27T18:36:20.768Z" }, - { url = "https://files.pythonhosted.org/packages/30/ac/0273f6fcb5f42e314c6d8cd99effae6a5354604d461b8d392b5ec9530a54/markupsafe-3.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0bf2a864d67e76e5c9a34dc26ec616a66b9888e25e7b9460e1c76d3293bd9dbf", size = 22940, upload-time = "2025-09-27T18:36:22.249Z" }, - { url = "https://files.pythonhosted.org/packages/19/ae/31c1be199ef767124c042c6c3e904da327a2f7f0cd63a0337e1eca2967a8/markupsafe-3.0.3-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc51efed119bc9cfdf792cdeaa4d67e8f6fcccab66ed4bfdd6bde3e59bfcbb2f", size = 21887, upload-time = "2025-09-27T18:36:23.535Z" }, - { url = "https://files.pythonhosted.org/packages/b2/76/7edcab99d5349a4532a459e1fe64f0b0467a3365056ae550d3bcf3f79e1e/markupsafe-3.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:068f375c472b3e7acbe2d5318dea141359e6900156b5b2ba06a30b169086b91a", size = 23692, upload-time = "2025-09-27T18:36:24.823Z" }, - { url = "https://files.pythonhosted.org/packages/a4/28/6e74cdd26d7514849143d69f0bf2399f929c37dc2b31e6829fd2045b2765/markupsafe-3.0.3-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:7be7b61bb172e1ed687f1754f8e7484f1c8019780f6f6b0786e76bb01c2ae115", size = 21471, upload-time = "2025-09-27T18:36:25.95Z" }, - { url = "https://files.pythonhosted.org/packages/62/7e/a145f36a5c2945673e590850a6f8014318d5577ed7e5920a4b3448e0865d/markupsafe-3.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f9e130248f4462aaa8e2552d547f36ddadbeaa573879158d721bbd33dfe4743a", size = 22923, upload-time = "2025-09-27T18:36:27.109Z" }, - { url = "https://files.pythonhosted.org/packages/0f/62/d9c46a7f5c9adbeeeda52f5b8d802e1094e9717705a645efc71b0913a0a8/markupsafe-3.0.3-cp311-cp311-win32.whl", hash = "sha256:0db14f5dafddbb6d9208827849fad01f1a2609380add406671a26386cdf15a19", size = 14572, upload-time = "2025-09-27T18:36:28.045Z" }, - { url = "https://files.pythonhosted.org/packages/83/8a/4414c03d3f891739326e1783338e48fb49781cc915b2e0ee052aa490d586/markupsafe-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:de8a88e63464af587c950061a5e6a67d3632e36df62b986892331d4620a35c01", size = 15077, upload-time = "2025-09-27T18:36:29.025Z" }, - { url = "https://files.pythonhosted.org/packages/35/73/893072b42e6862f319b5207adc9ae06070f095b358655f077f69a35601f0/markupsafe-3.0.3-cp311-cp311-win_arm64.whl", hash = "sha256:3b562dd9e9ea93f13d53989d23a7e775fdfd1066c33494ff43f5418bc8c58a5c", size = 13876, upload-time = "2025-09-27T18:36:29.954Z" }, - { url = "https://files.pythonhosted.org/packages/5a/72/147da192e38635ada20e0a2e1a51cf8823d2119ce8883f7053879c2199b5/markupsafe-3.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d53197da72cc091b024dd97249dfc7794d6a56530370992a5e1a08983ad9230e", size = 11615, upload-time = "2025-09-27T18:36:30.854Z" }, - { url = "https://files.pythonhosted.org/packages/9a/81/7e4e08678a1f98521201c3079f77db69fb552acd56067661f8c2f534a718/markupsafe-3.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1872df69a4de6aead3491198eaf13810b565bdbeec3ae2dc8780f14458ec73ce", size = 12020, upload-time = "2025-09-27T18:36:31.971Z" }, - { url = "https://files.pythonhosted.org/packages/1e/2c/799f4742efc39633a1b54a92eec4082e4f815314869865d876824c257c1e/markupsafe-3.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a7e8ae81ae39e62a41ec302f972ba6ae23a5c5396c8e60113e9066ef893da0d", size = 24332, upload-time = "2025-09-27T18:36:32.813Z" }, - { url = "https://files.pythonhosted.org/packages/3c/2e/8d0c2ab90a8c1d9a24f0399058ab8519a3279d1bd4289511d74e909f060e/markupsafe-3.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d6dd0be5b5b189d31db7cda48b91d7e0a9795f31430b7f271219ab30f1d3ac9d", size = 22947, upload-time = "2025-09-27T18:36:33.86Z" }, - { url = "https://files.pythonhosted.org/packages/2c/54/887f3092a85238093a0b2154bd629c89444f395618842e8b0c41783898ea/markupsafe-3.0.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:94c6f0bb423f739146aec64595853541634bde58b2135f27f61c1ffd1cd4d16a", size = 21962, upload-time = "2025-09-27T18:36:35.099Z" }, - { url = "https://files.pythonhosted.org/packages/c9/2f/336b8c7b6f4a4d95e91119dc8521402461b74a485558d8f238a68312f11c/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:be8813b57049a7dc738189df53d69395eba14fb99345e0a5994914a3864c8a4b", size = 23760, upload-time = "2025-09-27T18:36:36.001Z" }, - { url = "https://files.pythonhosted.org/packages/32/43/67935f2b7e4982ffb50a4d169b724d74b62a3964bc1a9a527f5ac4f1ee2b/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:83891d0e9fb81a825d9a6d61e3f07550ca70a076484292a70fde82c4b807286f", size = 21529, upload-time = "2025-09-27T18:36:36.906Z" }, - { url = "https://files.pythonhosted.org/packages/89/e0/4486f11e51bbba8b0c041098859e869e304d1c261e59244baa3d295d47b7/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:77f0643abe7495da77fb436f50f8dab76dbc6e5fd25d39589a0f1fe6548bfa2b", size = 23015, upload-time = "2025-09-27T18:36:37.868Z" }, - { url = "https://files.pythonhosted.org/packages/2f/e1/78ee7a023dac597a5825441ebd17170785a9dab23de95d2c7508ade94e0e/markupsafe-3.0.3-cp312-cp312-win32.whl", hash = "sha256:d88b440e37a16e651bda4c7c2b930eb586fd15ca7406cb39e211fcff3bf3017d", size = 14540, upload-time = "2025-09-27T18:36:38.761Z" }, - { url = "https://files.pythonhosted.org/packages/aa/5b/bec5aa9bbbb2c946ca2733ef9c4ca91c91b6a24580193e891b5f7dbe8e1e/markupsafe-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:26a5784ded40c9e318cfc2bdb30fe164bdb8665ded9cd64d500a34fb42067b1c", size = 15105, upload-time = "2025-09-27T18:36:39.701Z" }, - { url = "https://files.pythonhosted.org/packages/e5/f1/216fc1bbfd74011693a4fd837e7026152e89c4bcf3e77b6692fba9923123/markupsafe-3.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:35add3b638a5d900e807944a078b51922212fb3dedb01633a8defc4b01a3c85f", size = 13906, upload-time = "2025-09-27T18:36:40.689Z" }, - { url = "https://files.pythonhosted.org/packages/38/2f/907b9c7bbba283e68f20259574b13d005c121a0fa4c175f9bed27c4597ff/markupsafe-3.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e1cf1972137e83c5d4c136c43ced9ac51d0e124706ee1c8aa8532c1287fa8795", size = 11622, upload-time = "2025-09-27T18:36:41.777Z" }, - { url = "https://files.pythonhosted.org/packages/9c/d9/5f7756922cdd676869eca1c4e3c0cd0df60ed30199ffd775e319089cb3ed/markupsafe-3.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:116bb52f642a37c115f517494ea5feb03889e04df47eeff5b130b1808ce7c219", size = 12029, upload-time = "2025-09-27T18:36:43.257Z" }, - { url = "https://files.pythonhosted.org/packages/00/07/575a68c754943058c78f30db02ee03a64b3c638586fba6a6dd56830b30a3/markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:133a43e73a802c5562be9bbcd03d090aa5a1fe899db609c29e8c8d815c5f6de6", size = 24374, upload-time = "2025-09-27T18:36:44.508Z" }, - { url = "https://files.pythonhosted.org/packages/a9/21/9b05698b46f218fc0e118e1f8168395c65c8a2c750ae2bab54fc4bd4e0e8/markupsafe-3.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfcd093f13f0f0b7fdd0f198b90053bf7b2f02a3927a30e63f3ccc9df56b676", size = 22980, upload-time = "2025-09-27T18:36:45.385Z" }, - { url = "https://files.pythonhosted.org/packages/7f/71/544260864f893f18b6827315b988c146b559391e6e7e8f7252839b1b846a/markupsafe-3.0.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:509fa21c6deb7a7a273d629cf5ec029bc209d1a51178615ddf718f5918992ab9", size = 21990, upload-time = "2025-09-27T18:36:46.916Z" }, - { url = "https://files.pythonhosted.org/packages/c2/28/b50fc2f74d1ad761af2f5dcce7492648b983d00a65b8c0e0cb457c82ebbe/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4afe79fb3de0b7097d81da19090f4df4f8d3a2b3adaa8764138aac2e44f3af1", size = 23784, upload-time = "2025-09-27T18:36:47.884Z" }, - { url = "https://files.pythonhosted.org/packages/ed/76/104b2aa106a208da8b17a2fb72e033a5a9d7073c68f7e508b94916ed47a9/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:795e7751525cae078558e679d646ae45574b47ed6e7771863fcc079a6171a0fc", size = 21588, upload-time = "2025-09-27T18:36:48.82Z" }, - { url = "https://files.pythonhosted.org/packages/b5/99/16a5eb2d140087ebd97180d95249b00a03aa87e29cc224056274f2e45fd6/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8485f406a96febb5140bfeca44a73e3ce5116b2501ac54fe953e488fb1d03b12", size = 23041, upload-time = "2025-09-27T18:36:49.797Z" }, - { url = "https://files.pythonhosted.org/packages/19/bc/e7140ed90c5d61d77cea142eed9f9c303f4c4806f60a1044c13e3f1471d0/markupsafe-3.0.3-cp313-cp313-win32.whl", hash = "sha256:bdd37121970bfd8be76c5fb069c7751683bdf373db1ed6c010162b2a130248ed", size = 14543, upload-time = "2025-09-27T18:36:51.584Z" }, - { url = "https://files.pythonhosted.org/packages/05/73/c4abe620b841b6b791f2edc248f556900667a5a1cf023a6646967ae98335/markupsafe-3.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:9a1abfdc021a164803f4d485104931fb8f8c1efd55bc6b748d2f5774e78b62c5", size = 15113, upload-time = "2025-09-27T18:36:52.537Z" }, - { url = "https://files.pythonhosted.org/packages/f0/3a/fa34a0f7cfef23cf9500d68cb7c32dd64ffd58a12b09225fb03dd37d5b80/markupsafe-3.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:7e68f88e5b8799aa49c85cd116c932a1ac15caaa3f5db09087854d218359e485", size = 13911, upload-time = "2025-09-27T18:36:53.513Z" }, - { url = "https://files.pythonhosted.org/packages/e4/d7/e05cd7efe43a88a17a37b3ae96e79a19e846f3f456fe79c57ca61356ef01/markupsafe-3.0.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:218551f6df4868a8d527e3062d0fb968682fe92054e89978594c28e642c43a73", size = 11658, upload-time = "2025-09-27T18:36:54.819Z" }, - { url = "https://files.pythonhosted.org/packages/99/9e/e412117548182ce2148bdeacdda3bb494260c0b0184360fe0d56389b523b/markupsafe-3.0.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3524b778fe5cfb3452a09d31e7b5adefeea8c5be1d43c4f810ba09f2ceb29d37", size = 12066, upload-time = "2025-09-27T18:36:55.714Z" }, - { url = "https://files.pythonhosted.org/packages/bc/e6/fa0ffcda717ef64a5108eaa7b4f5ed28d56122c9a6d70ab8b72f9f715c80/markupsafe-3.0.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4e885a3d1efa2eadc93c894a21770e4bc67899e3543680313b09f139e149ab19", size = 25639, upload-time = "2025-09-27T18:36:56.908Z" }, - { url = "https://files.pythonhosted.org/packages/96/ec/2102e881fe9d25fc16cb4b25d5f5cde50970967ffa5dddafdb771237062d/markupsafe-3.0.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8709b08f4a89aa7586de0aadc8da56180242ee0ada3999749b183aa23df95025", size = 23569, upload-time = "2025-09-27T18:36:57.913Z" }, - { url = "https://files.pythonhosted.org/packages/4b/30/6f2fce1f1f205fc9323255b216ca8a235b15860c34b6798f810f05828e32/markupsafe-3.0.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b8512a91625c9b3da6f127803b166b629725e68af71f8184ae7e7d54686a56d6", size = 23284, upload-time = "2025-09-27T18:36:58.833Z" }, - { url = "https://files.pythonhosted.org/packages/58/47/4a0ccea4ab9f5dcb6f79c0236d954acb382202721e704223a8aafa38b5c8/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9b79b7a16f7fedff2495d684f2b59b0457c3b493778c9eed31111be64d58279f", size = 24801, upload-time = "2025-09-27T18:36:59.739Z" }, - { url = "https://files.pythonhosted.org/packages/6a/70/3780e9b72180b6fecb83a4814d84c3bf4b4ae4bf0b19c27196104149734c/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:12c63dfb4a98206f045aa9563db46507995f7ef6d83b2f68eda65c307c6829eb", size = 22769, upload-time = "2025-09-27T18:37:00.719Z" }, - { url = "https://files.pythonhosted.org/packages/98/c5/c03c7f4125180fc215220c035beac6b9cb684bc7a067c84fc69414d315f5/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8f71bc33915be5186016f675cd83a1e08523649b0e33efdb898db577ef5bb009", size = 23642, upload-time = "2025-09-27T18:37:01.673Z" }, - { url = "https://files.pythonhosted.org/packages/80/d6/2d1b89f6ca4bff1036499b1e29a1d02d282259f3681540e16563f27ebc23/markupsafe-3.0.3-cp313-cp313t-win32.whl", hash = "sha256:69c0b73548bc525c8cb9a251cddf1931d1db4d2258e9599c28c07ef3580ef354", size = 14612, upload-time = "2025-09-27T18:37:02.639Z" }, - { url = "https://files.pythonhosted.org/packages/2b/98/e48a4bfba0a0ffcf9925fe2d69240bfaa19c6f7507b8cd09c70684a53c1e/markupsafe-3.0.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1b4b79e8ebf6b55351f0d91fe80f893b4743f104bff22e90697db1590e47a218", size = 15200, upload-time = "2025-09-27T18:37:03.582Z" }, - { url = "https://files.pythonhosted.org/packages/0e/72/e3cc540f351f316e9ed0f092757459afbc595824ca724cbc5a5d4263713f/markupsafe-3.0.3-cp313-cp313t-win_arm64.whl", hash = "sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287", size = 13973, upload-time = "2025-09-27T18:37:04.929Z" }, - { url = "https://files.pythonhosted.org/packages/33/8a/8e42d4838cd89b7dde187011e97fe6c3af66d8c044997d2183fbd6d31352/markupsafe-3.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:eaa9599de571d72e2daf60164784109f19978b327a3910d3e9de8c97b5b70cfe", size = 11619, upload-time = "2025-09-27T18:37:06.342Z" }, - { url = "https://files.pythonhosted.org/packages/b5/64/7660f8a4a8e53c924d0fa05dc3a55c9cee10bbd82b11c5afb27d44b096ce/markupsafe-3.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c47a551199eb8eb2121d4f0f15ae0f923d31350ab9280078d1e5f12b249e0026", size = 12029, upload-time = "2025-09-27T18:37:07.213Z" }, - { url = "https://files.pythonhosted.org/packages/da/ef/e648bfd021127bef5fa12e1720ffed0c6cbb8310c8d9bea7266337ff06de/markupsafe-3.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f34c41761022dd093b4b6896d4810782ffbabe30f2d443ff5f083e0cbbb8c737", size = 24408, upload-time = "2025-09-27T18:37:09.572Z" }, - { url = "https://files.pythonhosted.org/packages/41/3c/a36c2450754618e62008bf7435ccb0f88053e07592e6028a34776213d877/markupsafe-3.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:457a69a9577064c05a97c41f4e65148652db078a3a509039e64d3467b9e7ef97", size = 23005, upload-time = "2025-09-27T18:37:10.58Z" }, - { url = "https://files.pythonhosted.org/packages/bc/20/b7fdf89a8456b099837cd1dc21974632a02a999ec9bf7ca3e490aacd98e7/markupsafe-3.0.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e8afc3f2ccfa24215f8cb28dcf43f0113ac3c37c2f0f0806d8c70e4228c5cf4d", size = 22048, upload-time = "2025-09-27T18:37:11.547Z" }, - { url = "https://files.pythonhosted.org/packages/9a/a7/591f592afdc734f47db08a75793a55d7fbcc6902a723ae4cfbab61010cc5/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ec15a59cf5af7be74194f7ab02d0f59a62bdcf1a537677ce67a2537c9b87fcda", size = 23821, upload-time = "2025-09-27T18:37:12.48Z" }, - { url = "https://files.pythonhosted.org/packages/7d/33/45b24e4f44195b26521bc6f1a82197118f74df348556594bd2262bda1038/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:0eb9ff8191e8498cca014656ae6b8d61f39da5f95b488805da4bb029cccbfbaf", size = 21606, upload-time = "2025-09-27T18:37:13.485Z" }, - { url = "https://files.pythonhosted.org/packages/ff/0e/53dfaca23a69fbfbbf17a4b64072090e70717344c52eaaaa9c5ddff1e5f0/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2713baf880df847f2bece4230d4d094280f4e67b1e813eec43b4c0e144a34ffe", size = 23043, upload-time = "2025-09-27T18:37:14.408Z" }, - { url = "https://files.pythonhosted.org/packages/46/11/f333a06fc16236d5238bfe74daccbca41459dcd8d1fa952e8fbd5dccfb70/markupsafe-3.0.3-cp314-cp314-win32.whl", hash = "sha256:729586769a26dbceff69f7a7dbbf59ab6572b99d94576a5592625d5b411576b9", size = 14747, upload-time = "2025-09-27T18:37:15.36Z" }, - { url = "https://files.pythonhosted.org/packages/28/52/182836104b33b444e400b14f797212f720cbc9ed6ba34c800639d154e821/markupsafe-3.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:bdc919ead48f234740ad807933cdf545180bfbe9342c2bb451556db2ed958581", size = 15341, upload-time = "2025-09-27T18:37:16.496Z" }, - { url = "https://files.pythonhosted.org/packages/6f/18/acf23e91bd94fd7b3031558b1f013adfa21a8e407a3fdb32745538730382/markupsafe-3.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:5a7d5dc5140555cf21a6fefbdbf8723f06fcd2f63ef108f2854de715e4422cb4", size = 14073, upload-time = "2025-09-27T18:37:17.476Z" }, - { url = "https://files.pythonhosted.org/packages/3c/f0/57689aa4076e1b43b15fdfa646b04653969d50cf30c32a102762be2485da/markupsafe-3.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:1353ef0c1b138e1907ae78e2f6c63ff67501122006b0f9abad68fda5f4ffc6ab", size = 11661, upload-time = "2025-09-27T18:37:18.453Z" }, - { url = "https://files.pythonhosted.org/packages/89/c3/2e67a7ca217c6912985ec766c6393b636fb0c2344443ff9d91404dc4c79f/markupsafe-3.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1085e7fbddd3be5f89cc898938f42c0b3c711fdcb37d75221de2666af647c175", size = 12069, upload-time = "2025-09-27T18:37:19.332Z" }, - { url = "https://files.pythonhosted.org/packages/f0/00/be561dce4e6ca66b15276e184ce4b8aec61fe83662cce2f7d72bd3249d28/markupsafe-3.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1b52b4fb9df4eb9ae465f8d0c228a00624de2334f216f178a995ccdcf82c4634", size = 25670, upload-time = "2025-09-27T18:37:20.245Z" }, - { url = "https://files.pythonhosted.org/packages/50/09/c419f6f5a92e5fadde27efd190eca90f05e1261b10dbd8cbcb39cd8ea1dc/markupsafe-3.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fed51ac40f757d41b7c48425901843666a6677e3e8eb0abcff09e4ba6e664f50", size = 23598, upload-time = "2025-09-27T18:37:21.177Z" }, - { url = "https://files.pythonhosted.org/packages/22/44/a0681611106e0b2921b3033fc19bc53323e0b50bc70cffdd19f7d679bb66/markupsafe-3.0.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f190daf01f13c72eac4efd5c430a8de82489d9cff23c364c3ea822545032993e", size = 23261, upload-time = "2025-09-27T18:37:22.167Z" }, - { url = "https://files.pythonhosted.org/packages/5f/57/1b0b3f100259dc9fffe780cfb60d4be71375510e435efec3d116b6436d43/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e56b7d45a839a697b5eb268c82a71bd8c7f6c94d6fd50c3d577fa39a9f1409f5", size = 24835, upload-time = "2025-09-27T18:37:23.296Z" }, - { url = "https://files.pythonhosted.org/packages/26/6a/4bf6d0c97c4920f1597cc14dd720705eca0bf7c787aebc6bb4d1bead5388/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:f3e98bb3798ead92273dc0e5fd0f31ade220f59a266ffd8a4f6065e0a3ce0523", size = 22733, upload-time = "2025-09-27T18:37:24.237Z" }, - { url = "https://files.pythonhosted.org/packages/14/c7/ca723101509b518797fedc2fdf79ba57f886b4aca8a7d31857ba3ee8281f/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5678211cb9333a6468fb8d8be0305520aa073f50d17f089b5b4b477ea6e67fdc", size = 23672, upload-time = "2025-09-27T18:37:25.271Z" }, - { url = "https://files.pythonhosted.org/packages/fb/df/5bd7a48c256faecd1d36edc13133e51397e41b73bb77e1a69deab746ebac/markupsafe-3.0.3-cp314-cp314t-win32.whl", hash = "sha256:915c04ba3851909ce68ccc2b8e2cd691618c4dc4c4232fb7982bca3f41fd8c3d", size = 14819, upload-time = "2025-09-27T18:37:26.285Z" }, - { url = "https://files.pythonhosted.org/packages/1a/8a/0402ba61a2f16038b48b39bccca271134be00c5c9f0f623208399333c448/markupsafe-3.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4faffd047e07c38848ce017e8725090413cd80cbc23d86e55c587bf979e579c9", size = 15426, upload-time = "2025-09-27T18:37:27.316Z" }, - { url = "https://files.pythonhosted.org/packages/70/bc/6f1c2f612465f5fa89b95bead1f44dcb607670fd42891d8fdcd5d039f4f4/markupsafe-3.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:32001d6a8fc98c8cb5c947787c5d08b0a50663d139f1305bac5885d98d9b40fa", size = 14146, upload-time = "2025-09-27T18:37:28.327Z" }, -] - [[package]] name = "mcp" version = "1.26.0" @@ -1433,7 +1271,7 @@ wheels = [ [[package]] name = "mcpbr" -version = "0.14.0" +version = "0.14.1" source = { editable = "." } dependencies = [ { name = "anthropic" }, @@ -1468,12 +1306,6 @@ dev = [ { name = "types-pyyaml" }, { name = "types-requests" }, ] -docs = [ - { name = "mkdocs" }, - { name = "mkdocs-material" }, - { name = "mkdocs-minify-plugin" }, - { name = "mkdocstrings", extra = ["python"] }, -] gemini = [ { name = "google-generativeai" }, ] @@ -1496,10 +1328,6 @@ requires-dist = [ { name = "google-generativeai", marker = "extra == 'all-providers'", specifier = ">=0.3.0" }, { name = "google-generativeai", marker = "extra == 'gemini'", specifier = ">=0.3.0" }, { name = "mcp", specifier = ">=1.0.0" }, - { name = "mkdocs", marker = "extra == 'docs'", specifier = ">=1.5.0" }, - { name = "mkdocs-material", marker = "extra == 'docs'", specifier = ">=9.5.0" }, - { name = "mkdocs-minify-plugin", marker = "extra == 'docs'", specifier = ">=0.7.0" }, - { name = "mkdocstrings", extras = ["python"], marker = "extra == 'docs'", specifier = ">=0.24.0" }, { name = "mypy", marker = "extra == 'dev'", specifier = ">=1.11.0" }, { name = "openai", marker = "extra == 'all-providers'", specifier = ">=1.0.0" }, { name = "openai", marker = "extra == 'openai'", specifier = ">=1.0.0" }, @@ -1534,149 +1362,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" }, ] -[[package]] -name = "mergedeep" -version = "1.3.4" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/3a/41/580bb4006e3ed0361b8151a01d324fb03f420815446c7def45d02f74c270/mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8", size = 4661, upload-time = "2021-02-05T18:55:30.623Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/2c/19/04f9b178c2d8a15b076c8b5140708fa6ffc5601fb6f1e975537072df5b2a/mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307", size = 6354, upload-time = "2021-02-05T18:55:29.583Z" }, -] - -[[package]] -name = "mkdocs" -version = "1.6.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "click" }, - { name = "colorama", marker = "sys_platform == 'win32'" }, - { name = "ghp-import" }, - { name = "jinja2" }, - { name = "markdown" }, - { name = "markupsafe" }, - { name = "mergedeep" }, - { name = "mkdocs-get-deps" }, - { name = "packaging" }, - { name = "pathspec" }, - { name = "pyyaml" }, - { name = "pyyaml-env-tag" }, - { name = "watchdog" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/bc/c6/bbd4f061bd16b378247f12953ffcb04786a618ce5e904b8c5a01a0309061/mkdocs-1.6.1.tar.gz", hash = "sha256:7b432f01d928c084353ab39c57282f29f92136665bdd6abf7c1ec8d822ef86f2", size = 3889159, upload-time = "2024-08-30T12:24:06.899Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/22/5b/dbc6a8cddc9cfa9c4971d59fb12bb8d42e161b7e7f8cc89e49137c5b279c/mkdocs-1.6.1-py3-none-any.whl", hash = "sha256:db91759624d1647f3f34aa0c3f327dd2601beae39a366d6e064c03468d35c20e", size = 3864451, upload-time = "2024-08-30T12:24:05.054Z" }, -] - -[[package]] -name = "mkdocs-autorefs" -version = "1.4.3" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "markdown" }, - { name = "markupsafe" }, - { name = "mkdocs" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/51/fa/9124cd63d822e2bcbea1450ae68cdc3faf3655c69b455f3a7ed36ce6c628/mkdocs_autorefs-1.4.3.tar.gz", hash = "sha256:beee715b254455c4aa93b6ef3c67579c399ca092259cc41b7d9342573ff1fc75", size = 55425, upload-time = "2025-08-26T14:23:17.223Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/9f/4d/7123b6fa2278000688ebd338e2a06d16870aaf9eceae6ba047ea05f92df1/mkdocs_autorefs-1.4.3-py3-none-any.whl", hash = "sha256:469d85eb3114801d08e9cc55d102b3ba65917a869b893403b8987b601cf55dc9", size = 25034, upload-time = "2025-08-26T14:23:15.906Z" }, -] - -[[package]] -name = "mkdocs-get-deps" -version = "0.2.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "mergedeep" }, - { name = "platformdirs" }, - { name = "pyyaml" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/98/f5/ed29cd50067784976f25ed0ed6fcd3c2ce9eb90650aa3b2796ddf7b6870b/mkdocs_get_deps-0.2.0.tar.gz", hash = "sha256:162b3d129c7fad9b19abfdcb9c1458a651628e4b1dea628ac68790fb3061c60c", size = 10239, upload-time = "2023-11-20T17:51:09.981Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/9f/d4/029f984e8d3f3b6b726bd33cafc473b75e9e44c0f7e80a5b29abc466bdea/mkdocs_get_deps-0.2.0-py3-none-any.whl", hash = "sha256:2bf11d0b133e77a0dd036abeeb06dec8775e46efa526dc70667d8863eefc6134", size = 9521, upload-time = "2023-11-20T17:51:08.587Z" }, -] - -[[package]] -name = "mkdocs-material" -version = "9.7.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "babel" }, - { name = "backrefs" }, - { name = "colorama" }, - { name = "jinja2" }, - { name = "markdown" }, - { name = "mkdocs" }, - { name = "mkdocs-material-extensions" }, - { name = "paginate" }, - { name = "pygments" }, - { name = "pymdown-extensions" }, - { name = "requests" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/27/e2/2ffc356cd72f1473d07c7719d82a8f2cbd261666828614ecb95b12169f41/mkdocs_material-9.7.1.tar.gz", hash = "sha256:89601b8f2c3e6c6ee0a918cc3566cb201d40bf37c3cd3c2067e26fadb8cce2b8", size = 4094392, upload-time = "2025-12-18T09:49:00.308Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/3e/32/ed071cb721aca8c227718cffcf7bd539620e9799bbf2619e90c757bfd030/mkdocs_material-9.7.1-py3-none-any.whl", hash = "sha256:3f6100937d7d731f87f1e3e3b021c97f7239666b9ba1151ab476cabb96c60d5c", size = 9297166, upload-time = "2025-12-18T09:48:56.664Z" }, -] - -[[package]] -name = "mkdocs-material-extensions" -version = "1.3.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/79/9b/9b4c96d6593b2a541e1cb8b34899a6d021d208bb357042823d4d2cabdbe7/mkdocs_material_extensions-1.3.1.tar.gz", hash = "sha256:10c9511cea88f568257f960358a467d12b970e1f7b2c0e5fb2bb48cab1928443", size = 11847, upload-time = "2023-11-22T19:09:45.208Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/5b/54/662a4743aa81d9582ee9339d4ffa3c8fd40a4965e033d77b9da9774d3960/mkdocs_material_extensions-1.3.1-py3-none-any.whl", hash = "sha256:adff8b62700b25cb77b53358dad940f3ef973dd6db797907c49e3c2ef3ab4e31", size = 8728, upload-time = "2023-11-22T19:09:43.465Z" }, -] - -[[package]] -name = "mkdocs-minify-plugin" -version = "0.8.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "csscompressor" }, - { name = "htmlmin2" }, - { name = "jsmin" }, - { name = "mkdocs" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/52/67/fe4b77e7a8ae7628392e28b14122588beaf6078b53eb91c7ed000fd158ac/mkdocs-minify-plugin-0.8.0.tar.gz", hash = "sha256:bc11b78b8120d79e817308e2b11539d790d21445eb63df831e393f76e52e753d", size = 8366, upload-time = "2024-01-29T16:11:32.982Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/1b/cd/2e8d0d92421916e2ea4ff97f10a544a9bd5588eb747556701c983581df13/mkdocs_minify_plugin-0.8.0-py3-none-any.whl", hash = "sha256:5fba1a3f7bd9a2142c9954a6559a57e946587b21f133165ece30ea145c66aee6", size = 6723, upload-time = "2024-01-29T16:11:31.851Z" }, -] - -[[package]] -name = "mkdocstrings" -version = "1.0.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "jinja2" }, - { name = "markdown" }, - { name = "markupsafe" }, - { name = "mkdocs" }, - { name = "mkdocs-autorefs" }, - { name = "pymdown-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/63/4d/1ca8a9432579184599714aaeb36591414cc3d3bfd9d494f6db540c995ae4/mkdocstrings-1.0.2.tar.gz", hash = "sha256:48edd0ccbcb9e30a3121684e165261a9d6af4d63385fc4f39a54a49ac3b32ea8", size = 101048, upload-time = "2026-01-24T15:57:25.735Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/57/32/407a9a5fdd7d8ecb4af8d830b9bcdf47ea68f916869b3f44bac31f081250/mkdocstrings-1.0.2-py3-none-any.whl", hash = "sha256:41897815a8026c3634fe5d51472c3a569f92ded0ad8c7a640550873eea3b6817", size = 35443, upload-time = "2026-01-24T15:57:23.933Z" }, -] - -[package.optional-dependencies] -python = [ - { name = "mkdocstrings-python" }, -] - -[[package]] -name = "mkdocstrings-python" -version = "2.0.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "griffe" }, - { name = "mkdocs-autorefs" }, - { name = "mkdocstrings" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/24/75/d30af27a2906f00eb90143470272376d728521997800f5dce5b340ba35bc/mkdocstrings_python-2.0.1.tar.gz", hash = "sha256:843a562221e6a471fefdd4b45cc6c22d2607ccbad632879234fa9692e9cf7732", size = 199345, upload-time = "2025-12-03T14:26:11.755Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/81/06/c5f8deba7d2cbdfa7967a716ae801aa9ca5f734b8f54fd473ef77a088dbe/mkdocstrings_python-2.0.1-py3-none-any.whl", hash = "sha256:66ecff45c5f8b71bf174e11d49afc845c2dfc7fc0ab17a86b6b337e0f24d8d90", size = 105055, upload-time = "2025-12-03T14:26:10.184Z" }, -] - [[package]] name = "multidict" version = "6.7.1" @@ -1978,15 +1663,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b7/b9/c538f279a4e237a006a2c98387d081e9eb060d203d8ed34467cc0f0b9b53/packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529", size = 74366, upload-time = "2026-01-21T20:50:37.788Z" }, ] -[[package]] -name = "paginate" -version = "0.5.7" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ec/46/68dde5b6bc00c1296ec6466ab27dddede6aec9af1b99090e1107091b3b84/paginate-0.5.7.tar.gz", hash = "sha256:22bd083ab41e1a8b4f3690544afb2c60c25e5c9a63a30fa2f483f6c60c8e5945", size = 19252, upload-time = "2024-08-25T14:17:24.139Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/90/96/04b8e52da071d28f5e21a805b19cb9390aa17a47462ac87f5e2696b9566d/paginate-0.5.7-py2.py3-none-any.whl", hash = "sha256:b885e2af73abcf01d9559fd5216b57ef722f8c42affbb63942377668e35c7591", size = 13746, upload-time = "2024-08-25T14:17:22.55Z" }, -] - [[package]] name = "pandas" version = "3.0.0" @@ -2487,19 +2163,6 @@ crypto = [ { name = "cryptography" }, ] -[[package]] -name = "pymdown-extensions" -version = "10.20.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "markdown" }, - { name = "pyyaml" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/1e/6c/9e370934bfa30e889d12e61d0dae009991294f40055c238980066a7fbd83/pymdown_extensions-10.20.1.tar.gz", hash = "sha256:e7e39c865727338d434b55f1dd8da51febcffcaebd6e1a0b9c836243f660740a", size = 852860, upload-time = "2026-01-24T05:56:56.758Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/40/6d/b6ee155462a0156b94312bdd82d2b92ea56e909740045a87ccb98bf52405/pymdown_extensions-10.20.1-py3-none-any.whl", hash = "sha256:24af7feacbca56504b313b7b418c4f5e1317bb5fea60f03d57be7fcc40912aa0", size = 268768, upload-time = "2026-01-24T05:56:54.537Z" }, -] - [[package]] name = "pynacl" version = "1.6.2" @@ -2677,18 +2340,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f1/12/de94a39c2ef588c7e6455cfbe7343d3b2dc9d6b6b2f40c4c6565744c873d/pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b", size = 149341, upload-time = "2025-09-25T21:32:56.828Z" }, ] -[[package]] -name = "pyyaml-env-tag" -version = "1.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "pyyaml" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/eb/2e/79c822141bfd05a853236b504869ebc6b70159afc570e1d5a20641782eaa/pyyaml_env_tag-1.1.tar.gz", hash = "sha256:2eb38b75a2d21ee0475d6d97ec19c63287a7e140231e4214969d0eac923cd7ff", size = 5737, upload-time = "2025-05-13T15:24:01.64Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/04/11/432f32f8097b03e3cd5fe57e88efb685d964e2e5178a48ed61e841f7fdce/pyyaml_env_tag-1.1-py3-none-any.whl", hash = "sha256:17109e1a528561e32f026364712fee1264bc2ea6715120891174ed1b980d2e04", size = 4722, upload-time = "2025-05-13T15:23:59.629Z" }, -] - [[package]] name = "referencing" version = "0.37.0" @@ -3146,33 +2797,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/3a/9a/f3919d7ee7ba99dabf0aac7e299c6c328f5eae94f9f6b28c76005f882d5d/wandb-0.24.2-py3-none-win_arm64.whl", hash = "sha256:b42614b99f8b9af69f88c15a84283a973c8cd5750e9c4752aa3ce21f13dbac9a", size = 20268261, upload-time = "2026-02-05T00:12:14.353Z" }, ] -[[package]] -name = "watchdog" -version = "6.0.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/db/7d/7f3d619e951c88ed75c6037b246ddcf2d322812ee8ea189be89511721d54/watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282", size = 131220, upload-time = "2024-11-01T14:07:13.037Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e0/24/d9be5cd6642a6aa68352ded4b4b10fb0d7889cb7f45814fb92cecd35f101/watchdog-6.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6eb11feb5a0d452ee41f824e271ca311a09e250441c262ca2fd7ebcf2461a06c", size = 96393, upload-time = "2024-11-01T14:06:31.756Z" }, - { url = "https://files.pythonhosted.org/packages/63/7a/6013b0d8dbc56adca7fdd4f0beed381c59f6752341b12fa0886fa7afc78b/watchdog-6.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ef810fbf7b781a5a593894e4f439773830bdecb885e6880d957d5b9382a960d2", size = 88392, upload-time = "2024-11-01T14:06:32.99Z" }, - { url = "https://files.pythonhosted.org/packages/d1/40/b75381494851556de56281e053700e46bff5b37bf4c7267e858640af5a7f/watchdog-6.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:afd0fe1b2270917c5e23c2a65ce50c2a4abb63daafb0d419fde368e272a76b7c", size = 89019, upload-time = "2024-11-01T14:06:34.963Z" }, - { url = "https://files.pythonhosted.org/packages/39/ea/3930d07dafc9e286ed356a679aa02d777c06e9bfd1164fa7c19c288a5483/watchdog-6.0.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdd4e6f14b8b18c334febb9c4425a878a2ac20efd1e0b231978e7b150f92a948", size = 96471, upload-time = "2024-11-01T14:06:37.745Z" }, - { url = "https://files.pythonhosted.org/packages/12/87/48361531f70b1f87928b045df868a9fd4e253d9ae087fa4cf3f7113be363/watchdog-6.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c7c15dda13c4eb00d6fb6fc508b3c0ed88b9d5d374056b239c4ad1611125c860", size = 88449, upload-time = "2024-11-01T14:06:39.748Z" }, - { url = "https://files.pythonhosted.org/packages/5b/7e/8f322f5e600812e6f9a31b75d242631068ca8f4ef0582dd3ae6e72daecc8/watchdog-6.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6f10cb2d5902447c7d0da897e2c6768bca89174d0c6e1e30abec5421af97a5b0", size = 89054, upload-time = "2024-11-01T14:06:41.009Z" }, - { url = "https://files.pythonhosted.org/packages/68/98/b0345cabdce2041a01293ba483333582891a3bd5769b08eceb0d406056ef/watchdog-6.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:490ab2ef84f11129844c23fb14ecf30ef3d8a6abafd3754a6f75ca1e6654136c", size = 96480, upload-time = "2024-11-01T14:06:42.952Z" }, - { url = "https://files.pythonhosted.org/packages/85/83/cdf13902c626b28eedef7ec4f10745c52aad8a8fe7eb04ed7b1f111ca20e/watchdog-6.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:76aae96b00ae814b181bb25b1b98076d5fc84e8a53cd8885a318b42b6d3a5134", size = 88451, upload-time = "2024-11-01T14:06:45.084Z" }, - { url = "https://files.pythonhosted.org/packages/fe/c4/225c87bae08c8b9ec99030cd48ae9c4eca050a59bf5c2255853e18c87b50/watchdog-6.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a175f755fc2279e0b7312c0035d52e27211a5bc39719dd529625b1930917345b", size = 89057, upload-time = "2024-11-01T14:06:47.324Z" }, - { url = "https://files.pythonhosted.org/packages/a9/c7/ca4bf3e518cb57a686b2feb4f55a1892fd9a3dd13f470fca14e00f80ea36/watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13", size = 79079, upload-time = "2024-11-01T14:06:59.472Z" }, - { url = "https://files.pythonhosted.org/packages/5c/51/d46dc9332f9a647593c947b4b88e2381c8dfc0942d15b8edc0310fa4abb1/watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379", size = 79078, upload-time = "2024-11-01T14:07:01.431Z" }, - { url = "https://files.pythonhosted.org/packages/d4/57/04edbf5e169cd318d5f07b4766fee38e825d64b6913ca157ca32d1a42267/watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e", size = 79076, upload-time = "2024-11-01T14:07:02.568Z" }, - { url = "https://files.pythonhosted.org/packages/ab/cc/da8422b300e13cb187d2203f20b9253e91058aaf7db65b74142013478e66/watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f", size = 79077, upload-time = "2024-11-01T14:07:03.893Z" }, - { url = "https://files.pythonhosted.org/packages/2c/3b/b8964e04ae1a025c44ba8e4291f86e97fac443bca31de8bd98d3263d2fcf/watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26", size = 79078, upload-time = "2024-11-01T14:07:05.189Z" }, - { url = "https://files.pythonhosted.org/packages/62/ae/a696eb424bedff7407801c257d4b1afda455fe40821a2be430e173660e81/watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c", size = 79077, upload-time = "2024-11-01T14:07:06.376Z" }, - { url = "https://files.pythonhosted.org/packages/b5/e8/dbf020b4d98251a9860752a094d09a65e1b436ad181faf929983f697048f/watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2", size = 79078, upload-time = "2024-11-01T14:07:07.547Z" }, - { url = "https://files.pythonhosted.org/packages/07/f6/d0e5b343768e8bcb4cda79f0f2f55051bf26177ecd5651f84c07567461cf/watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a", size = 79065, upload-time = "2024-11-01T14:07:09.525Z" }, - { url = "https://files.pythonhosted.org/packages/db/d9/c495884c6e548fce18a8f40568ff120bc3a4b7b99813081c8ac0c936fa64/watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680", size = 79070, upload-time = "2024-11-01T14:07:10.686Z" }, - { url = "https://files.pythonhosted.org/packages/33/e8/e40370e6d74ddba47f002a32919d91310d6074130fe4e17dabcafc15cbf1/watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", size = 79067, upload-time = "2024-11-01T14:07:11.845Z" }, -] - [[package]] name = "xxhash" version = "3.6.0"