diff --git a/CHANGELOG.md b/CHANGELOG.md index 738da96..6db6d97 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,22 +10,75 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ### Added -- **Smart Runner:** Node9 now intercepts the _initial_ command you pass to it (e.g., `node9 "rm -rf /"`) and checks it against your security policy before execution. +- **Shadow Git Snapshots (Phase 2):** (Coming Soon) Automatic lightweight git commits before AI edits, allowing `node9 undo`. + +--- + +## [0.3.0] - 2026-03-06 + +### Added + +- **Multi-Channel Race Engine:** Node9 now fires all enabled approval channels simultaneously (Native Popup, Browser UI, Cloud/Slack, and Terminal). The first channel to receive a human signature wins and instantly cancels all other pending racers. +- **AI Negotiation Loop:** Replaced generic "Access Denied" errors with instructional feedback prompts. When blocked, Node9 injects a structured message into the LLM's context window, teaching the agent to apologize, explain its reasoning, or pivot to a safer alternative. +- **Native OS Dialogs:** Integrated sub-second, keyboard-navigable approval dialogs via `osascript` (macOS), `zenity` (Linux), and `PowerShell` (Windows). +- **Resolution Waterfall:** Implemented a strict 5-tier configuration precedence engine: `Environment Variables` > `Cloud (SaaS)` > `Project Config` > `Global Config` > `Defaults`. +- **Identity-Aware Execution:** The policy engine now distinguishes between a Human (`Terminal`) and an AI Agent (`Claude/Gemini`). Manual shell commands now benefit from "Nuclear-only" protection, while AI agents remain under "Zero Trust" restrictions. +- **Extended Hook Timeouts:** Default hook timeouts for Claude and Gemini have been increased to 10 minutes to support asynchronous Slack and remote approvals. +- **Sandbox Paths:** Added `policy.sandboxPaths` support. Any command operating strictly within defined safe zones (e.g., `/tmp/**`) is automatically allowed without human intervention. +- **Atomic File Writes:** Implemented `atomicWriteSync` for all state files (`decisions.json`, `trust.json`, `PAUSED`). This prevents JSON corruption during concurrent AI tool calls. + +### Fixed + +- **True Proxy Interception:** Rewrote the Proxy/MCP runner to intercept the Agent's `stdin` (requests) rather than just monitoring the Server's `stdout` (responses). Dangerous actions are now caught _before_ they reach the target server. +- **Port Conflict Resurrection:** The daemon now detects zombie PID files and `EADDRINUSE` errors, automatically clearing dead processes and resurrecting the server. +- **Credential Separation:** API keys are now strictly isolated in `~/.node9/credentials.json` and are never read from project-level config files to prevent accidental leakage to version control. + +### Security + +- **Waterfall Governance:** Cloud-level "Organization Policies" now act as a Master Lock, disabling local "Allow" buttons in the Native and Browser UIs when a remote manager signature is required. +- **Graceful Idle Timeout:** The background daemon now implements a 12-hour inactivity timer to automatically shut down and free system resources after use. + +--- + +## [0.2.0] - 2026-03-01 + +### Added + +- **Local Approval Daemon:** `node9 daemon` starts a browser-based approval UI at `http://127.0.0.1:7391`. Approve, deny, or set persistent per-tool decisions ("Always Allow" / "Always Deny") without a Slack account. +- **Persistent Decisions Panel:** The daemon browser UI shows a `πŸ“‹ Persistent Decisions` panel listing every "Always Allow" / "Always Deny" decision with a **Revoke** button. Revoking a decision removes it from `~/.node9/decisions.json` and updates all open tabs instantly via SSE. +- **`GET /decisions` endpoint:** Returns the current persistent decisions map (read-only, no CSRF required). +- **`DELETE /decisions/:toolName` endpoint:** Removes a persistent decision (requires `X-Node9-Token` CSRF header). +- **Auto-start Daemon:** When a dangerous call arrives and no approval mechanism is running, Node9 automatically starts the daemon and opens the browser β€” no manual setup required. +- **Browser-Close Fallback:** If the browser tab is closed mid-approval, Node9 waits 2 seconds (to allow for an accidental refresh), then abandons the request and falls back to a terminal Y/N prompt, then blocks if there is no TTY. The daemon shuts down cleanly after abandonment. +- **`autoStartDaemon` Setting:** Toggle daemon auto-start from the browser UI (βš™οΈ Settings) or via `settings.autoStartDaemon: false` in `~/.node9/config.json`. When disabled, dangerous commands fall back directly to a terminal prompt. +- **Smart Runner β€” Browser-First Flow:** `node9 "rm -rf ./data"` now prefers the browser popup over a plain terminal prompt. Terminal Y/N is only shown if the daemon fails to start. This makes the full approval UI available without pre-running `node9 daemon`. +- **Terminal Prompt Timeout:** The interactive Y/N approval prompt now auto-denies after 30 seconds if no response is given, preventing agents from hanging indefinitely when a human walks away. +- **Smart Runner:** Node9 intercepts the initial command you pass to it (e.g., `node9 "rm -rf /"`) and checks it against your security policy before execution. - **Improved Gemini CLI Integration:** Fully supports the latest Gemini CLI hook schema (array-based `BeforeTool`/`AfterTool`). -- **Verbose Policy Debugging:** Added `~/.node9/policy-debug.log` and `~/.node9/hook-debug.log` to help troubleshoot complex policy decisions and hook payloads. +- **Verbose Policy Debugging:** Added `~/.node9/hook-debug.log` to help troubleshoot hook payloads (`NODE9_DEBUG=1`). +- **`getGlobalSettings()` API:** Reads machine-level settings from `~/.node9/config.json` independently of project config, so daemon lifecycle preferences are never overridden by a project's policy file. ### Fixed - **Case-Insensitive Tool Matching:** Tool names like `Shell`, `shell`, and `run_shell_command` are now correctly identified and intercepted regardless of casing. - **Robust Hook Setup:** `node9 addto gemini` now automatically detects and fixes outdated object-based hook configurations in `settings.json`. -- **Terminal Prompt in Hooks:** `node9 check` now correctly fallbacks to an interactive terminal prompt (y/N) even when running as a background hook, if no Node9 Cloud API key is configured. -- **Duplicate Interception:** Fixed a bug where `run_shell_command` was in the default `ignoredTools` list, preventing it from being properly inspected. +- **Duplicate Interception:** Fixed a bug where `run_shell_command` was in the default `ignoredTools` list, preventing it from being properly inspected. Also corrected the example `node9.config.json`. +- **`node9 check` stdin Hang:** The hook no longer hangs if the AI agent opens stdin but never sends EOF. A 5-second safety timeout ensures the process exits cleanly. +- **Smart Runner Terminal Prompt:** `allowTerminalFallback` was not being passed in the smart runner path, so the Y/N prompt never appeared. Now correctly shown when no daemon is running and a TTY is available. + +### Security + +- **CSRF Token on Daemon:** A per-run UUID token is embedded in the browser UI and required as `X-Node9-Token` on all state-changing requests (`POST /decision`, `POST /settings`). Prevents other local processes from silently approving pending actions. +- **Credentials File Permissions:** `~/.node9/credentials.json` is now written with `0o600` (owner read/write only). +- **Daemon Error Logging:** Handler errors in `/check`, `/decision`, and `/settings` are now logged to stderr when `NODE9_DEBUG=1`, making production debugging possible without exposing errors by default. ### Changed - `node9 addto` now supports the new array-based hook structure for Gemini CLI. - Updated internal `GeminiSettings` interfaces to match the latest CLI specifications. +--- + ## [0.1.0] - 2026-02-01 ### Added diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 304b98b..b8b0e87 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -5,7 +5,7 @@ Thank you for helping make AI agents safer. All contributions are welcome β€” bu ## Getting Started ```bash -git clone https://github.com/nadav-node9/node9-proxy.git +git clone https://github.com/node9-ai/node9-proxy.git cd node9-proxy npm install npm run build @@ -21,7 +21,7 @@ npm run demo ### Reporting Bugs -Open an issue at https://github.com/nadav-node9/node9-proxy/issues with: +Open an issue at https://github.com/node9-ai/node9-proxy/issues with: - Node.js version (`node --version`) - What you ran and what you expected diff --git a/README.md b/README.md index ab6e259..c348321 100644 --- a/README.md +++ b/README.md @@ -5,280 +5,132 @@ [![NPM Version](https://img.shields.io/npm/v/@node9/proxy.svg)](https://www.npmjs.com/package/@node9/proxy) [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) -**Node9** is the execution security layer for the Agentic Era. It acts as a deterministic "Sudo" proxy between AI Agents (Claude Code, Gemini CLI, Cursor, MCP Servers) and your production environment. - -While others try to _guess_ if a prompt is malicious (Semantic Security), Node9 _intercepts_ the actual action (Execution Security). - -## πŸ—ΊοΈ Architecture - -```mermaid -sequenceDiagram - participant LLM as AI Model (Gemini/Claude) - participant Agent as Agent CLI (Gemini/Claude Code) - participant Node9 as Node9 Proxy - participant OS as Local System/Shell - - LLM->>Agent: "Delete the tmp folder" - Agent->>Node9: Tool Call: Shell { command: "rm -rf ./tmp" } - - Note over Node9: 🧠 Semantic Parser analyzes AST - Note over Node9: πŸ›‘οΈ Policy Engine checks rules - - alt is dangerous & not allowed - Node9-->>Agent: ❌ BLOCK: Decision denied - Agent-->>LLM: "Action blocked by security policy" - else is safe OR approved by user - Node9->>OS: Execute: rm -rf ./tmp - OS-->>Node9: Success - Node9-->>Agent: Tool Result: Success - Agent-->>LLM: "Folder deleted" - end -``` - ---- - -## πŸ›‘ The Problem: Agent Liability - -In 2026, AI agents have "Write Access" to everything (GitHub, AWS, Stripe, Databases). - -- **The Risk:** An agent hallucinating a `DROP DATABASE` or an unauthorized `aws.delete_instance`. -- **The Solution:** Node9 intercepts high-risk tool calls and pauses execution until a human provides a signature. - -## πŸš€ Key Features - -- **Deterministic "Sudo" Mode:** Intercepts dangerous tool calls based on hardcoded policies. -- **Human-in-the-Loop (HITL):** Requires explicit approval via the **Terminal** (Local) or **Slack** (Cloud). -- **One-Command Setup:** `node9 addto claude` wires up full protection in seconds β€” no manual config editing. -- **MCP Native:** Deep-packet inspection of JSON-RPC traffic. Protects any Model Context Protocol server. -- **Hook Native:** Plugs into Claude Code, Gemini CLI, and Cursor's native hook systems to intercept built-in tools (Bash, Write, Edit) β€” not just MCP calls. -- **Global Config:** Store your security posture in a `node9.config.json` file in your project root. - ---- - -## πŸ“¦ Installation +**Node9** is the execution security layer for the Agentic Era. It encases autonomous AI Agents (Claude Code, Gemini CLI, Cursor, MCP Servers) in a deterministic security wrapper, intercepting dangerous shell commands and tool calls before they execute. -```bash -npm install -g @node9/proxy -``` +While others try to _guess_ if a prompt is malicious (Semantic Security), Node9 _governs_ the actual action (Execution Security). --- -## ⚑ Quick Start - -The fastest way to get full protection is one command: - -```bash -# Protect Claude Code (MCP servers + Bash/Write/Edit hooks) -node9 addto claude +## ⚑ Key Architectural Upgrades -# Protect Gemini CLI (BeforeTool / AfterTool hooks) -node9 addto gemini +### 🏁 The Multi-Channel Race Engine -# Protect Cursor -node9 addto cursor -``` +Node9 initiates a **Concurrent Race** across all enabled channels. The first channel to receive a human signature wins and instantly cancels the others: -### 🎯 The Smart Runner +- **Native Popup:** OS-level dialog (Mac/Win/Linux) for sub-second keyboard dismissal. +- **Browser Dashboard:** Local web UI for deep inspection of large payloads (SQL/Code). +- **Cloud (Slack):** Remote asynchronous approval for team governance. +- **Terminal:** Classic `[Y/n]` prompt for manual proxy usage and SSH sessions. -You can now protect any command by simply prefixing it with `node9`: +### 🧠 AI Negotiation Loop -```bash -# Intercepts 'rm -rf /' before starting -node9 "rm -rf /" +Node9 doesn't just "cut the wire." When a command is blocked, it injects a **Structured Negotiation Prompt** back into the AI’s context window. This teaches the AI why it was stopped and instructs it to pivot to a safer alternative or apologize to the human. -# Runs Gemini with full proxy & hook protection -node9 gemini -``` +### 🌊 The Resolution Waterfall -_Note: Always wrap the target command in quotes to avoid argument conflicts._ +Security posture is resolved using a strict 5-tier waterfall: -Node9 will show you exactly what it's about to change and ask for confirmation before touching any config file. +1. **Env Vars:** Session-level overrides (e.g., `NODE9_PAUSED=1`). +2. **Cloud (SaaS):** Global organization "Locks" that cannot be bypassed locally. +3. **Project Config:** Repository-specific rules (`node9.config.json`). +4. **Global Config:** Personal UI preferences (`~/.node9/config.json`). +5. **Defaults:** The built-in safety net. --- -## πŸ›  Usage - -### 1. Connect to Node9 Cloud (Optional) - -To route approvals to **Slack** when you are away from your terminal, login once with your API key: +## πŸš€ Quick Start ```bash -node9 login -``` - -_Your credentials are stored securely in `~/.node9/credentials.json`._ - -### 2. One-Command Agent Setup - -`node9 addto ` wires up Node9 to your AI agent automatically: - -| Target | MCP Servers | Built-in Tools (Bash, Write, Edit...) | Audit Log | -| -------- | :---------: | :-----------------------------------: | :-------: | -| `claude` | βœ… | βœ… via `PreToolUse` hook | βœ… | -| `gemini` | βœ… | βœ… via `BeforeTool` hook | βœ… | -| `cursor` | βœ… | βœ… via `preToolUse` hook | βœ… | - -**What it does under the hood:** - -- Wraps your existing MCP servers with `node9 proxy` (asks for confirmation first) -- Adds a pre-execution hook β†’ `node9 check` runs before every tool call -- Adds a post-execution hook β†’ `node9 log` writes every executed action to `~/.node9/audit.log` - -### 3. Manual Command & MCP Protection - -To protect any command or MCP server manually: - -**Protecting the Gemini CLI:** - -```bash -node9 gemini -``` - -**Protecting a direct command:** +npm install -g @node9/proxy -```bash -node9 "rm -rf ./data" -``` +# 1. Setup protection for your favorite agent +node9 addto claude +node9 addto gemini -**Protecting GitHub MCP Server:** +# 2. (Optional) Connect to Slack for remote approvals +node9 login -```bash -node9 "npx @modelcontextprotocol/server-github" +# 3. Check your status +node9 status ``` -### 4. SDK β€” Protect Functions in Your Own Code - -Wrap any async function with `protect()` to require human approval before it runs: - -```typescript -import { protect } from '@node9/proxy'; +--- -const deleteDatabase = protect('aws.rds.delete_database', async (name: string) => { - // ... actual deletion logic -}); +## πŸ›  Protection Modes -// Node9 intercepts this and prompts for approval before running -await deleteDatabase('production-db-v1'); -``` +| Mode | Target | How it works | +| :-------------- | :--------------------- | :------------------------------------------------------ | +| **Hook Mode** | Claude, Gemini, Cursor | `node9 addto ` wires native pre-execution hooks. | +| **Proxy Mode** | MCP Servers, Shell | `node9 "npx "` intercepts JSON-RPC traffic. | +| **Manual Mode** | You | `node9 rm -rf /` protects you from your own typos. | --- ## βš™οΈ Configuration (`node9.config.json`) -Add a `node9.config.json` to your project root or `~/.node9/config.json` for global use. +Rules are **merged additive**β€”you cannot "un-danger" a word locally if it was defined as dangerous by a higher authority (like the Cloud). ```json { "settings": { - "mode": "standard" + "mode": "standard", + "enableUndo": true, + "approvers": { + "native": true, + "browser": true, + "cloud": true, + "terminal": true + } }, "policy": { - "dangerousWords": ["delete", "drop", "terminate", "rm", "rmdir"], + "sandboxPaths": ["/tmp/**", "**/test-results/**"], + "dangerousWords": ["drop", "destroy", "purge", "push --force"], "ignoredTools": ["list_*", "get_*", "read_*"], "toolInspection": { "bash": "command", - "shell": "command", - "run_shell_command": "command" - }, - "rules": [ - { - "action": "rm", - "allowPaths": ["**/node_modules/**", "dist/**", "build/**"] - } - ] - }, - "environments": { - "production": { - "requireApproval": true, - "slackChannel": "#alerts-prod-security" - }, - "development": { - "requireApproval": false + "postgres:query": "sql" } } } ``` -**Modes:** - -- `standard`: Allows everything except tools containing `dangerousWords`. -- `strict`: Blocks **everything** except tools listed in `ignoredTools`. - -**Environment overrides** (keyed by `NODE_ENV`): - -- `requireApproval: false` β€” auto-allow all actions in that environment (useful for local dev). -- `slackChannel` β€” route cloud approvals to a specific Slack channel for that environment. - -### πŸ”Œ Universal Tool Inspection (The "Universal Adapter") +--- -Node9 can protect **any** tool, even if it's not Claude or Gemini. You can tell Node9 where to find the "dangerous" payload in any tool call. +## βͺ Phase 2: The "Undo" Engine (Coming Soon) -Example: Protecting a custom "Stripe" MCP server: - -```json -"toolInspection": { - "stripe.send_refund": "amount", - "github.delete*": "params.repo_name" -} -``` - -Now, whenever your agent calls `stripe.send_refund`, Node9 will extract the `amount` and check it against your global security policy. +Node9 is currently building **Shadow Git Snapshots**. When enabled, Node9 takes a silent, lightweight Git snapshot right before an AI agent is allowed to edit or delete files. If the AI hallucinates, you can revert the entire session with one click: `node9 undo`. --- -## πŸ›‘οΈ How it Works +## πŸ”§ Troubleshooting -Node9 is **deterministic**. It doesn't use AI to check AI. +**`node9 check` exits immediately / Claude is never blocked** +Node9 fails open by design to prevent breaking your agent. Check debug logs: `NODE9_DEBUG=1 claude`. -### Hook Mode (via `node9 addto`) +**Terminal prompt never appears during Claude/Gemini sessions** +Interactive agents run hooks in a "Headless" subprocess. You **must** enable `native: true` or `browser: true` in your config to see approval prompts. -``` -Claude wants to run Bash("rm -rf /data") - β”‚ - PreToolUse hook fires - β†’ node9 check - β”‚ - β”Œβ”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β” - β”‚ BLOCKED β”‚ β†’ Claude is told the action was denied - β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - OR - β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” - β”‚ APPROVED β”‚ β†’ Claude runs the command - β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - β”‚ - PostToolUse hook fires - β†’ node9 log β†’ appended to ~/.node9/audit.log -``` - -### Proxy Mode (via `node9 proxy`) - -``` -1. Intercept β€” catches the JSON-RPC tools/call request mid-flight -2. Evaluate β€” checks against your local node9.config.json -3. Suspend β€” execution is frozen in a PENDING state -4. Authorize β€” Local: prompt in terminal / Cloud: button in Slack -5. Release β€” command forwarded to the target only after approval -``` +**"Blocked by Organization (SaaS)"** +A corporate policy has locked this action. You must click the "Approve" button in your company's Slack channel to proceed. --- -## πŸ“ˆ Roadmap +## πŸ—ΊοΈ Roadmap -- [x] Local Terminal "Sudo" (OSS) -- [x] MCP JSON-RPC Interceptor -- [x] Slack Remote Approvals (Pro) -- [x] One-command setup (`node9 addto claude/gemini/cursor`) -- [x] Hook-native integration (PreToolUse / BeforeTool / preToolUse) -- [x] Audit log (`~/.node9/audit.log`) -- [ ] **Multi-Admin Quorum** (Approve only if 2 admins click) -- [ ] **SOC2 Tamper-proof Audit Logs** (Enterprise) +- [x] **Multi-Channel Race Engine** (Simultaneous Native/Browser/Cloud/Terminal) +- [x] **AI Negotiation Loop** (Instructional feedback loop to guide LLM behavior) +- [x] **Resolution Waterfall** (Cascading configuration: Env > Cloud > Project > Global) +- [x] **Native OS Dialogs** (Sub-second approval via Mac/Win/Linux system windows) +- [x] **One-command Agent Setup** (`node9 addto claude | gemini | cursor`) +- [x] **Identity-Aware Execution** (Differentiates between Human vs. AI risk levels) +- [ ] **Shadow Git Snapshots** (1-click Undo for AI hallucinations) +- [ ] **Execution Sandboxing** (Simulate dangerous commands in a virtual FS before applying) +- [ ] **Multi-Admin Quorum** (Require 2+ human signatures for high-stakes production actions) +- [ ] **SOC2 Tamper-proof Audit Trail** (Cryptographically signed, cloud-managed logs) --- -## 🏒 Enterprise & Commercial Use - -The local proxy is free forever for individual developers. For teams requiring **Slack Integration**, **VPC Deployment**, and **Tamper-proof Audit Logs**, visit [node9.ai](https://node9.ai) or contact `support@node9.ai`. - ---- +## 🏒 Enterprise & Compliance -**Safe Agentic Automations Start with Node9.** πŸ›‘οΈπŸš€ +Node9 Pro provides **Governance Locking**, **SAML/SSO**, and **VPC Deployment**. +Visit [node9.ai](https://node9.ai diff --git a/SECURITY.md b/SECURITY.md index 98c48a2..c15c62e 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -4,13 +4,13 @@ | Version | Supported | | ------- | --------- | -| 0.1.x | Yes | +| latest | βœ… Yes | ## Reporting a Vulnerability **Please do not report security vulnerabilities through public GitHub issues.** -Email: **suppot@node9.ai** +Email: **security@node9.ai** Include: @@ -22,6 +22,20 @@ You will receive an acknowledgment within 48 hours and a resolution timeline wit We follow responsible disclosure β€” we ask that you give us time to patch before publishing details publicly. +## Threat Model + +node9-proxy is a **localhost-only** daemon that intercepts AI agent tool calls before they execute. The daemon binds exclusively to `127.0.0.1` and is never exposed to the network. + +| Component | Trust boundary | +| ------------------------------------------ | ------------------------------------------------------------------------------ | +| Daemon HTTP server | localhost only (`127.0.0.1:7391`) | +| Browser UI | Protected by a per-session CSRF token (`x-node9-token` header) | +| Internal resolve endpoint (`/resolve/:id`) | Protected by a separate per-session internal token (`x-node9-internal` header) | +| Audit log | Written to `~/.node9/audit.log`; secrets are redacted before storage | +| Decisions file | Written to `~/.node9/decisions.json`; stores only `allow`/`deny` per tool name | + +**Known limitation**: If an attacker already has local code execution on the machine running the daemon, they can bypass controls. This is by design β€” the daemon is a human-in-the-loop safety layer, not a sandbox. + ## Scope Reports are in scope for: diff --git a/node9.config.json_ b/node9.config.json_ new file mode 100644 index 0000000..b04906f --- /dev/null +++ b/node9.config.json_ @@ -0,0 +1,90 @@ +{ + "version": "1.0", + "settings": { + "mode": "standard" + }, + "policy": { + "dangerousWords": [ + "drop", + "destroy", + "purge", + "rmdir", + "push", + "force" + ], + "ignoredTools": [ + "list_*", + "get_*", + "read_*", + "describe_*", + "read", + "write", + "edit", + "multiedit", + "glob", + "grep", + "ls", + "notebookread", + "notebookedit", + "todoread", + "todowrite", + "webfetch", + "websearch", + "exitplanmode", + "askuserquestion", + "agent", + "task*" + ], + "toolInspection": { + "bash": "command", + "shell": "command", + "run_shell_command": "command", + "terminal.execute": "command" + }, + "rules": [ + { + "action": "rm", + "allowPaths": [ + "**/node_modules/**", + "**/node_modules", + "dist/**", + "dist", + "build/**", + "build", + ".next/**", + ".next", + ".nuxt/**", + ".nuxt", + "coverage/**", + "coverage", + ".cache/**", + ".cache", + "tmp/**", + "tmp", + "temp/**", + "temp", + "**/__pycache__/**", + "**/__pycache__", + "**/.pytest_cache/**", + "**/.pytest_cache", + "**/*.log", + "**/*.tmp", + ".DS_Store", + "**/yarn.lock", + "**/package-lock.json", + "**/pnpm-lock.yaml" + ] + } + ] + }, + "environments": { + "production": { + "requireApproval": true, + "slackChannel": "#general" + }, + "development": { + "requireApproval": true, + "slackChannel": "#general" + } + } +} diff --git a/node9.config.json__ b/node9.config.json__ new file mode 100644 index 0000000..b0f6339 --- /dev/null +++ b/node9.config.json__ @@ -0,0 +1,93 @@ +{ + "version": "1.0", + "settings": { + "mode": "standard", + "approvers": { + "native": false + } + }, + "policy": { + "dangerousWords": [ + "drop", + "destroy", + "purge", + "rmdir", + "push", + "force" + ], + "ignoredTools": [ + "list_*", + "get_*", + "read_*", + "describe_*", + "read", + "write", + "edit", + "multiedit", + "glob", + "grep", + "ls", + "notebookread", + "notebookedit", + "todoread", + "todowrite", + "webfetch", + "websearch", + "exitplanmode", + "askuserquestion", + "agent", + "task*" + ], + "toolInspection": { + "bash": "command", + "shell": "command", + "run_shell_command": "command", + "terminal.execute": "command" + }, + "rules": [ + { + "action": "rm", + "allowPaths": [ + "**/node_modules/**", + "**/node_modules", + "dist/**", + "dist", + "build/**", + "build", + ".next/**", + ".next", + ".nuxt/**", + ".nuxt", + "coverage/**", + "coverage", + ".cache/**", + ".cache", + "tmp/**", + "tmp", + "temp/**", + "temp", + "**/__pycache__/**", + "**/__pycache__", + "**/.pytest_cache/**", + "**/.pytest_cache", + "**/*.log", + "**/*.tmp", + ".DS_Store", + "**/yarn.lock", + "**/package-lock.json", + "**/pnpm-lock.yaml" + ] + } + ] + }, + "environments": { + "production": { + "requireApproval": true, + "slackChannel": "#general" + }, + "development": { + "requireApproval": true, + "slackChannel": "#general" + } + } +} diff --git a/package-lock.json b/package-lock.json index c52556c..f28bf13 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@node9/proxy", - "version": "0.1.0", + "version": "0.2.1", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@node9/proxy", - "version": "0.1.0", + "version": "0.2.1", "license": "MIT", "dependencies": { "@inquirer/prompts": "^8.3.0", diff --git a/package.json b/package.json index 500c990..7e6a6ab 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@node9/proxy", - "version": "0.2.0", + "version": "0.2.1", "description": "The Sudo Command for AI Agents. Execution Security for Claude Code & MCP.", "main": "./dist/index.js", "module": "./dist/index.mjs", @@ -51,8 +51,6 @@ "build": "tsup", "dev": "tsup --watch", "demo": "tsx examples/demo.ts", - "test": "vitest run", - "test:watch": "vitest", "typecheck": "tsc --noEmit", "lint": "eslint .", "lint:fix": "eslint . --fix", @@ -60,8 +58,11 @@ "format:check": "prettier --check .", "fix": "npm run format && npm run lint:fix", "validate": "npm run format && npm run lint && npm run typecheck && npm run test && npm run test:e2e && npm run build", - "test:e2e": "bash scripts/e2e.sh", - "prepublishOnly": "npm run validate" + "test:e2e": "NODE9_TESTING=1 bash scripts/e2e.sh", + "prepublishOnly": "npm run validate", + "test": "NODE_ENV=test vitest --run", + "test:watch": "NODE_ENV=test vitest", + "test:ui": "NODE_ENV=test vitest --ui" }, "dependencies": { "@inquirer/prompts": "^8.3.0", diff --git a/scripts/e2e.sh b/scripts/e2e.sh index 7b731d7..22cb368 100755 --- a/scripts/e2e.sh +++ b/scripts/e2e.sh @@ -40,7 +40,9 @@ fi # Run all hook checks from a temp dir with a known config so tests are # independent of whatever node9.config.json exists in the repo root. TESTDIR=$(mktemp -d) -trap 'rm -rf "$TESTDIR"' EXIT +TEST_HOME=$(mktemp -d) +mkdir -p "$TEST_HOME/.node9" +trap 'rm -rf "$TESTDIR" "$TEST_HOME"' EXIT cat > "$TESTDIR/node9.config.json" << 'EOF' { @@ -57,6 +59,12 @@ cat > "$TESTDIR/node9.config.json" << 'EOF' EOF cd "$TESTDIR" +# Disable all daemon interaction so tests never open a browser or hang waiting for clicks +export NODE9_NO_AUTO_DAEMON=1 +# Stop any running daemon using the real HOME before we isolate the environment +node "$REPO_ROOT/dist/cli.js" daemon stop 2>/dev/null || true +# Use an isolated HOME so credentials.json and decisions.json don't affect results +export HOME="$TEST_HOME" # ============================================================================= # PART 1 β€” node9 check (simulates Claude Code's PreToolUse hook) @@ -131,6 +139,14 @@ out=$(echo '' | $NODE9 check 2>/dev/null); ec=$? out=$(echo 'not json at all' | $NODE9 check 2>/dev/null); ec=$? [ $ec -eq 0 ] && pass "Invalid JSON β†’ exits 0 (fail-open)" || fail "Invalid JSON crashed (exit $ec)" +echo -e "\n ${YELLOW}Daemon isolation (NODE9_NO_AUTO_DAEMON=1 must prevent auto-start):${RESET}" +if [ ! -f "$HOME/.node9/daemon.pid" ]; then + pass "NODE9_NO_AUTO_DAEMON=1 β€” no daemon was started during check tests" +else + fail "Daemon was auto-started during tests β€” NODE9_NO_AUTO_DAEMON=1 had no effect" + node "$REPO_ROOT/dist/cli.js" daemon stop 2>/dev/null || true +fi + # ============================================================================= # PART 2 β€” node9 log (simulates Claude Code's PostToolUse hook) # ============================================================================= diff --git a/src/__tests__/advanced_policy.test.ts b/src/__tests__/advanced_policy.test.ts index 6b62d4f..ec9814c 100644 --- a/src/__tests__/advanced_policy.test.ts +++ b/src/__tests__/advanced_policy.test.ts @@ -26,7 +26,9 @@ describe('Path-Based Policy (Advanced)', () => { vi.spyOn(fs, 'readFileSync').mockReturnValue(JSON.stringify(mockConfig)); // Should be allowed because it matches the glob - expect(await evaluatePolicy('Bash', { command: 'rm -rf ./node_modules/lodash' })).toBe('allow'); + expect( + (await evaluatePolicy('Bash', { command: 'rm -rf ./node_modules/lodash' })).decision + ).toBe('allow'); }); it('blocks "rm -rf src" when not in allow list', async () => { @@ -43,7 +45,7 @@ describe('Path-Based Policy (Advanced)', () => { vi.mocked(fs.existsSync).mockReturnValue(true); vi.spyOn(fs, 'readFileSync').mockReturnValue(JSON.stringify(mockConfig)); - expect(await evaluatePolicy('Bash', { command: 'rm -rf src' })).toBe('review'); + expect((await evaluatePolicy('Bash', { command: 'rm -rf src' })).decision).toBe('review'); }); it('blocks "rm -rf .env" using explicit blockPaths', async () => { @@ -61,7 +63,7 @@ describe('Path-Based Policy (Advanced)', () => { vi.mocked(fs.existsSync).mockReturnValue(true); vi.spyOn(fs, 'readFileSync').mockReturnValue(JSON.stringify(mockConfig)); - expect(await evaluatePolicy('Bash', { command: 'rm .env' })).toBe('review'); + expect((await evaluatePolicy('Bash', { command: 'rm .env' })).decision).toBe('review'); }); it('correctly tokenizes and identifies "rm" even with complex shell syntax', async () => { @@ -74,8 +76,10 @@ describe('Path-Based Policy (Advanced)', () => { vi.spyOn(fs, 'readFileSync').mockReturnValue(JSON.stringify(mockConfig)); // Pipe bypass attempt - expect(await evaluatePolicy('Bash', { command: 'echo "hello" | rm' })).toBe('review'); + expect((await evaluatePolicy('Bash', { command: 'echo "hello" | rm' })).decision).toBe( + 'review' + ); // Escaped bypass attempt - expect(await evaluatePolicy('Bash', { command: 'r\\m -rf /' })).toBe('review'); + expect((await evaluatePolicy('Bash', { command: 'r\\m -rf /' })).decision).toBe('review'); }); }); diff --git a/src/__tests__/cli_runner.test.ts b/src/__tests__/cli_runner.test.ts index 99243dd..348b7b7 100644 --- a/src/__tests__/cli_runner.test.ts +++ b/src/__tests__/cli_runner.test.ts @@ -1,22 +1,218 @@ -import { describe, it, expect, vi } from 'vitest'; - -// We mock child_process.spawn to verify that the Smart Runner actually tries to execute proxied commands -vi.mock('child_process', () => ({ - spawn: vi.fn(() => ({ - stdin: { write: vi.fn() }, - stdout: { on: vi.fn(), pipe: vi.fn() }, - on: vi.fn(), - })), +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import fs from 'fs'; +import os from 'os'; +import path from 'path'; +import { + authorizeHeadless, + evaluatePolicy, + getGlobalSettings, + isDaemonRunning, + _resetConfigCache, +} from '../core.js'; + +vi.mock('@inquirer/prompts', () => ({ confirm: vi.fn() })); + +vi.mock('../ui/native', () => ({ + askNativePopup: vi.fn().mockReturnValue('deny'), + sendDesktopNotification: vi.fn(), })); -// We'll test the logic by importing the cli file. -// Note: importing a file that calls .parse() immediately can be tricky in tests. -// For this repo, we'll verify the logic by checking if the 'proxy' behavior is integrated. +const existsSpy = vi.spyOn(fs, 'existsSync'); +const readSpy = vi.spyOn(fs, 'readFileSync'); +vi.spyOn(fs, 'writeFileSync').mockImplementation(() => undefined); +vi.spyOn(fs, 'mkdirSync').mockImplementation(() => undefined); +const homeSpy = vi.spyOn(os, 'homedir'); + +/** Mock global config with native approver disabled so the race engine + * relies only on daemon / terminal / cloud channels (matching test intent). */ +function mockNoNativeConfig(extra?: Record) { + const globalPath = path.join('/mock/home', '.node9', 'config.json'); + existsSpy.mockImplementation((p) => String(p) === globalPath); + readSpy.mockImplementation((p) => + String(p) === globalPath + ? JSON.stringify({ settings: { approvers: { native: false }, ...extra } }) + : '' + ); +} + +beforeEach(() => { + _resetConfigCache(); + existsSpy.mockReturnValue(false); + readSpy.mockReturnValue(''); + homeSpy.mockReturnValue('/mock/home'); + delete process.env.NODE9_API_KEY; + Object.defineProperty(process.stdout, 'isTTY', { value: false, configurable: true }); +}); + +afterEach(() => { + vi.clearAllMocks(); + vi.unstubAllGlobals(); +}); + +// ── getGlobalSettings ────────────────────────────────────────────────────── + +describe('getGlobalSettings', () => { + it('returns autoStartDaemon:true when no global config exists', () => { + const s = getGlobalSettings(); + expect(s.autoStartDaemon).toBe(true); + }); + + it('returns autoStartDaemon:true when config has no setting', () => { + const globalPath = path.join('/mock/home', '.node9', 'config.json'); + existsSpy.mockImplementation((p) => String(p) === globalPath); + readSpy.mockImplementation((p) => + String(p) === globalPath ? JSON.stringify({ settings: { mode: 'standard' } }) : '' + ); + expect(getGlobalSettings().autoStartDaemon).toBe(true); + }); + + it('returns autoStartDaemon:false when explicitly set to false', () => { + const globalPath = path.join('/mock/home', '.node9', 'config.json'); + existsSpy.mockImplementation((p) => String(p) === globalPath); + readSpy.mockImplementation((p) => + String(p) === globalPath + ? JSON.stringify({ settings: { mode: 'standard', autoStartDaemon: false } }) + : '' + ); + expect(getGlobalSettings().autoStartDaemon).toBe(false); + }); + + it('returns autoStartDaemon:true when explicitly set to true', () => { + const globalPath = path.join('/mock/home', '.node9', 'config.json'); + existsSpy.mockImplementation((p) => String(p) === globalPath); + readSpy.mockImplementation((p) => + String(p) === globalPath + ? JSON.stringify({ settings: { mode: 'standard', autoStartDaemon: true } }) + : '' + ); + expect(getGlobalSettings().autoStartDaemon).toBe(true); + }); + + it('returns defaults when config file is malformed JSON', () => { + const globalPath = path.join('/mock/home', '.node9', 'config.json'); + existsSpy.mockImplementation((p) => String(p) === globalPath); + readSpy.mockImplementation((p) => (String(p) === globalPath ? 'not json' : '')); + const s = getGlobalSettings(); + expect(s.autoStartDaemon).toBe(true); + expect(s.mode).toBe('standard'); + }); +}); + +// ── Smart runner policy (shell tool) ────────────────────────────────────── + +describe('smart runner β€” shell command policy', () => { + it('blocks dangerous shell commands', async () => { + // Use a non-sandbox path β€” /tmp/** is in sandboxPaths and would be auto-allowed + const result = await evaluatePolicy('shell', { command: 'rm -rf /home/user/data' }); + expect(result.decision).toBe('review'); + }); + + it('allows safe shell commands', async () => { + const result = await evaluatePolicy('shell', { command: 'ls -la' }); + expect(result.decision).toBe('allow'); + }); + + it('blocks when command contains dangerous word in path', async () => { + const result = await evaluatePolicy('shell', { command: 'find . -delete' }); + expect(result.decision).toBe('review'); + }); + + it('allows npm install (no dangerous tokens)', async () => { + const result = await evaluatePolicy('shell', { command: 'npm install express' }); + expect(result.decision).toBe('allow'); + }); +}); + +// ── autoStartDaemon: false β†’ noApprovalMechanism (no daemon auto-start) ─── + +describe('autoStartDaemon: false β€” blocks without daemon when no TTY', () => { + it('returns noApprovalMechanism when no API key, no daemon, no TTY', async () => { + // Disable native so racePromises is empty β†’ noApprovalMechanism + mockNoNativeConfig(); + const result = await authorizeHeadless('delete_user', {}); + expect(result.approved).toBe(false); + expect(result.noApprovalMechanism).toBe(true); + }); + + it('approves via persistent allow decision (deterministic, no HITL)', async () => { + // Persistent decisions are checked before the race engine β€” no popup, no TTY needed + const decisionsPath = path.join('/mock/home', '.node9', 'decisions.json'); + existsSpy.mockImplementation((p) => String(p) === decisionsPath); + readSpy.mockImplementation((p) => + String(p) === decisionsPath ? JSON.stringify({ delete_user: 'allow' }) : '' + ); + + const result = await authorizeHeadless('delete_user', {}); + expect(result.approved).toBe(true); + }); + + it('blocks via persistent deny decision (deterministic, no HITL)', async () => { + const decisionsPath = path.join('/mock/home', '.node9', 'decisions.json'); + existsSpy.mockImplementation((p) => String(p) === decisionsPath); + readSpy.mockImplementation((p) => + String(p) === decisionsPath ? JSON.stringify({ delete_user: 'deny' }) : '' + ); + + const result = await authorizeHeadless('delete_user', {}); + expect(result.approved).toBe(false); + }); +}); + +// ── Daemon abandon β†’ fallthrough ─────────────────────────────────────────── + +describe('daemon abandon fallthrough', () => { + it('returns noApprovalMechanism when daemon is not running and no other channels', async () => { + // All approvers disabled except browser; daemon is not running β†’ empty race β†’ noApprovalMechanism + mockNoNativeConfig(); + // No daemon PID file β†’ isDaemonRunning() = false β†’ RACER 3 skipped + // No TTY, no allowTerminalFallback β†’ RACER 4 skipped + // racePromises.length === 0 β†’ noApprovalMechanism: true + const result = await authorizeHeadless('delete_user', {}); + expect(result.approved).toBe(false); + expect(result.noApprovalMechanism).toBe(true); + }); + + it('returns approved:false when daemon denies (deterministic daemon response)', async () => { + // Set up a live daemon that deterministically denies β€” no HITL needed + const pidPath = path.join('/mock/home', '.node9', 'daemon.pid'); + const globalPath = path.join('/mock/home', '.node9', 'config.json'); + existsSpy.mockImplementation((p) => [pidPath, globalPath].includes(String(p))); + readSpy.mockImplementation((p) => { + if (String(p) === pidPath) return JSON.stringify({ pid: process.pid, port: 7391 }); + if (String(p) === globalPath) + return JSON.stringify({ settings: { approvers: { native: false } } }); + return ''; + }); + + vi.stubGlobal( + 'fetch', + vi.fn().mockImplementation((url: string) => { + if (String(url).endsWith('/check')) { + return Promise.resolve({ ok: true, json: () => Promise.resolve({ id: 'test-id' }) }); + } + // Daemon returns deny β€” deterministic outcome, no interaction required + return Promise.resolve({ + ok: true, + json: () => Promise.resolve({ decision: 'deny' }), + }); + }) + ); + + const result = await authorizeHeadless('delete_user', {}); + expect(result.approved).toBe(false); + }); +}); + +// ── isDaemonRunning: stale PID file ─────────────────────────────────────── -describe('CLI Smart Runner', () => { - it('identifies that non-internal commands should be proxied', () => { - // This is a placeholder for a more complex integration test. - // In a real scenario, we'd use 'execa' to run the built binary and check stdout. - expect(true).toBe(true); +describe('isDaemonRunning β€” stale PID file', () => { + it('returns false when PID file exists but process is dead', () => { + const pidPath = path.join('/mock/home', '.node9', 'daemon.pid'); + existsSpy.mockImplementation((p) => String(p) === pidPath); + // Use PID 999999 which is virtually guaranteed to not exist + readSpy.mockImplementation((p) => + String(p) === pidPath ? JSON.stringify({ pid: 999999, port: 7391 }) : '' + ); + expect(isDaemonRunning()).toBe(false); }); }); diff --git a/src/__tests__/core.test.ts b/src/__tests__/core.test.ts index b3e4f67..4a663f6 100644 --- a/src/__tests__/core.test.ts +++ b/src/__tests__/core.test.ts @@ -2,30 +2,96 @@ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; import fs from 'fs'; import os from 'os'; import path from 'path'; -import { authorizeAction, evaluatePolicy, authorizeHeadless, _resetConfigCache } from '../core.js'; +// 1. Lock down the testing environment globally so it survives between tests. +process.env.NODE9_TESTING = '1'; +process.env.VITEST = 'true'; +process.env.NODE_ENV = 'test'; + +// 2. Mock Terminal prompts vi.mock('@inquirer/prompts', () => ({ confirm: vi.fn() })); +// 3. Mock Native UI module +vi.mock('../ui/native', () => ({ + askNativePopup: vi.fn().mockResolvedValue('deny'), + sendDesktopNotification: vi.fn(), +})); + +// 4. THE ULTIMATE KILL-SWITCH: Mock Node.js OS commands +// If the real UI module accidentally loads, this physically prevents it from opening a window. +vi.mock('child_process', () => ({ + spawn: vi.fn().mockReturnValue({ + unref: vi.fn(), + stdout: { on: vi.fn() }, + on: vi.fn((event, cb) => { + // Instantly simulate the user clicking "Block" so the test moves on without a popup + if (event === 'close') cb(1); + }), + }), +})); + +// 5. NOW we import core AFTER the mocks are registered! +import { + authorizeAction, + evaluatePolicy, + authorizeHeadless, + _resetConfigCache, + getPersistentDecision, + isDaemonRunning, +} from '../core.js'; + // Global spies const existsSpy = vi.spyOn(fs, 'existsSync'); const readSpy = vi.spyOn(fs, 'readFileSync'); -const writeSpy = vi.spyOn(fs, 'writeFileSync'); -const mkdirSpy = vi.spyOn(fs, 'mkdirSync'); +vi.spyOn(fs, 'writeFileSync').mockImplementation(() => undefined); +vi.spyOn(fs, 'mkdirSync').mockImplementation(() => undefined); const homeSpy = vi.spyOn(os, 'homedir'); async function getConfirm() { return vi.mocked((await import('@inquirer/prompts')).confirm); } +// ── Config mock helpers ─────────────────────────────────────────────────────── + +function mockProjectConfig(config: object) { + const projectPath = path.join(process.cwd(), 'node9.config.json'); + existsSpy.mockImplementation((p) => String(p) === projectPath); + readSpy.mockImplementation((p) => (String(p) === projectPath ? JSON.stringify(config) : '')); +} + +function mockGlobalConfig(config: object) { + const globalPath = path.join('/mock/home', '.node9', 'config.json'); + existsSpy.mockImplementation((p) => String(p) === globalPath); + readSpy.mockImplementation((p) => (String(p) === globalPath ? JSON.stringify(config) : '')); +} + +function mockBothConfigs(projectConfig: object, globalConfig: object) { + const projectPath = path.join(process.cwd(), 'node9.config.json'); + const globalPath = path.join('/mock/home', '.node9', 'config.json'); + existsSpy.mockImplementation((p) => [projectPath, globalPath].includes(String(p))); + readSpy.mockImplementation((p) => { + if (String(p) === projectPath) return JSON.stringify(projectConfig); + if (String(p) === globalPath) return JSON.stringify(globalConfig); + return ''; + }); +} + +/** Config that disables the native approver so racePromises can be empty + * and noApprovalMechanism tests work correctly. */ +function mockNoNativeConfig(extra?: object) { + mockGlobalConfig({ + settings: { approvers: { native: false }, ...(extra as Record) }, + }); +} + +// ── Lifecycle ───────────────────────────────────────────────────────────────── + beforeEach(() => { _resetConfigCache(); existsSpy.mockReturnValue(false); readSpy.mockReturnValue(''); - writeSpy.mockImplementation(() => undefined); - mkdirSpy.mockImplementation(() => undefined); homeSpy.mockReturnValue('/mock/home'); - - // Default headless + delete process.env.NODE9_API_KEY; Object.defineProperty(process.stdout, 'isTTY', { value: false, configurable: true }); }); @@ -33,70 +99,422 @@ afterEach(() => { vi.clearAllMocks(); }); -describe('authorizeAction', () => { - it('returns true for safe tool calls', async () => { +// ── Ignored tool patterns ───────────────────────────────────────────────────── + +describe('ignored tool patterns', () => { + it.each([ + 'list_users', + 'list_s3_buckets', + 'get_config', + 'get_user_by_id', + 'read_file', + 'read_object', + 'describe_table', + 'describe_instance', + ])('allows "%s" without prompting', async (tool) => { const confirm = await getConfirm(); - expect(await authorizeAction('list_users', {})).toBe(true); + expect(await authorizeAction(tool, {})).toBe(true); expect(confirm).not.toHaveBeenCalled(); }); +}); - it('prompts user for dangerous actions when no API key is configured', async () => { - const confirm = await getConfirm(); - confirm.mockResolvedValue(true); - Object.defineProperty(process.stdout, 'isTTY', { value: true, configurable: true }); +// ── Standard mode β€” safe tools ──────────────────────────────────────────────── + +describe('standard mode β€” safe tools', () => { + it.each(['create_user', 'send_notification', 'invoke_lambda', 'start_job'])( + 'allows "%s" without prompting', + async (tool) => { + const confirm = await getConfirm(); + expect(await authorizeAction(tool, {})).toBe(true); + expect(confirm).not.toHaveBeenCalled(); + } + ); +}); - expect(await authorizeAction('delete_user', { id: 123 })).toBe(true); - expect(confirm).toHaveBeenCalled(); +// ── Standard mode β€” dangerous word detection ────────────────────────────────── + +describe('standard mode β€” dangerous word detection', () => { + // Use evaluatePolicy directly β€” no HITL, purely deterministic policy check + it.each([ + 'delete_user', + 'drop_table', + 'remove_file', + 'terminate_instance', + 'refund_payment', + 'write_record', + 'update_schema', + 'destroy_cluster', + 'aws.rds.rm_database', + 'purge_queue', + 'format_disk', + ])('evaluatePolicy flags "%s" as review (dangerous word match)', async (tool) => { + expect((await evaluatePolicy(tool)).decision).toBe('review'); }); - it('returns false when user denies terminal approval', async () => { - const confirm = await getConfirm(); - confirm.mockResolvedValue(false); - Object.defineProperty(process.stdout, 'isTTY', { value: true, configurable: true }); + it('dangerous word match is case-insensitive', async () => { + expect((await evaluatePolicy('DELETE_USER')).decision).toBe('review'); + }); +}); + +// ── Persistent decision approval β€” approve / deny ───────────────────────────── + +describe('persistent decision approval', () => { + // Persistent decisions are file-based and deterministic β€” no HITL required + function setPersistentDecision(toolName: string, decision: 'allow' | 'deny') { + const decisionsPath = path.join('/mock/home', '.node9', 'decisions.json'); + existsSpy.mockImplementation((p) => String(p) === decisionsPath); + readSpy.mockImplementation((p) => + String(p) === decisionsPath ? JSON.stringify({ [toolName]: decision }) : '' + ); + } + + it('returns true when persistent decision is allow', async () => { + setPersistentDecision('delete_user', 'allow'); + expect(await authorizeAction('delete_user', {})).toBe(true); + }); - expect(await authorizeAction('drop_table', { name: 'users' })).toBe(false); + it('returns false when persistent decision is deny', async () => { + setPersistentDecision('delete_user', 'deny'); + expect(await authorizeAction('delete_user', {})).toBe(false); }); }); -describe('evaluatePolicy', () => { - it.each(['list_users', 'get_config', 'read_file', 'describe_table'])( - 'returns "allow" for ignored tool "%s"', - async (tool) => { - expect(await evaluatePolicy(tool)).toBe('allow'); - } - ); +// ── Bash tool β€” shell command interception ──────────────────────────────────── - it.each(['delete_user', 'drop_table', 'rm_data', 'destroy_cluster'])( - 'returns "review" for dangerous tool "%s"', +describe('Bash tool β€” shell command interception', () => { + it.each([ + { cmd: 'rm /home/user/deleteme.txt', desc: 'rm command' }, + { cmd: 'rm -rf /', desc: 'rm -rf' }, + { cmd: 'sudo rm -rf /home/user', desc: 'sudo rm' }, + { cmd: 'rmdir /var/log/mydir', desc: 'rmdir command' }, + { cmd: '/usr/bin/rm file.txt', desc: 'absolute path to rm' }, + { cmd: 'find . -delete', desc: 'find -delete flag' }, + { cmd: 'npm update', desc: 'npm update' }, + { cmd: 'apt-get purge vim', desc: 'apt-get purge' }, + ])('blocks Bash when command is "$desc"', async ({ cmd }) => { + expect((await evaluatePolicy('Bash', { command: cmd })).decision).toBe('review'); + }); + + it.each([ + { cmd: 'ls -la', desc: 'ls' }, + { cmd: 'cat /etc/hosts', desc: 'cat' }, + { cmd: 'git status', desc: 'git status' }, + { cmd: 'npm install', desc: 'npm install' }, + { cmd: 'node --version', desc: 'node --version' }, + ])('allows Bash when command is "$desc"', async ({ cmd }) => { + expect((await evaluatePolicy('Bash', { command: cmd })).decision).toBe('allow'); + }); + + it('authorizeHeadless blocks Bash rm when no approval mechanism', async () => { + // Disable native approver so racePromises is empty β†’ noApprovalMechanism + mockNoNativeConfig(); + const result = await authorizeHeadless('Bash', { command: 'rm /home/user/data.txt' }); + expect(result.approved).toBe(false); + expect(result.noApprovalMechanism).toBe(true); + }); + + it('authorizeHeadless allows Bash ls', async () => { + const result = await authorizeHeadless('Bash', { command: 'ls -la' }); + expect(result.approved).toBe(true); + }); +}); + +// ── False-positive regression ───────────────────────────────────────────────── + +describe('false-positive regression β€” rm substring', () => { + it.each(['confirm_action', 'check_permissions', 'perform_search'])( + 'does not block "%s"', async (tool) => { - expect(await evaluatePolicy(tool)).toBe('review'); + expect((await evaluatePolicy(tool)).decision).toBe('allow'); } ); +}); - it('respects project-level node9.config.json', async () => { - const projectPath = path.join(process.cwd(), 'node9.config.json'); - existsSpy.mockImplementation((p) => String(p) === projectPath); - readSpy.mockImplementation((p) => { - if (String(p) === projectPath) { - return JSON.stringify({ policy: { dangerousWords: ['deploy'] } }); - } - return ''; +// ── Strict mode ─────────────────────────────────────────────────────────────── + +describe('strict mode', () => { + beforeEach(() => { + mockProjectConfig({ + settings: { mode: 'strict' }, + policy: { dangerousWords: [], ignoredTools: ['list_*'] }, + environments: {}, }); + }); - expect(await evaluatePolicy('deploy_app')).toBe('review'); - expect(await evaluatePolicy('delete_user')).toBe('allow'); + it('intercepts non-dangerous tools that would pass in standard mode', async () => { + expect((await evaluatePolicy('create_user')).decision).toBe('review'); + }); + + it('still allows ignored tools', async () => { + expect((await evaluatePolicy('list_users')).decision).toBe('allow'); }); }); +// ── Environment config ──────────────────────────────────────────────────────── + +describe('environment config', () => { + it('strict mode blocks all non-dangerous tools by default', async () => { + process.env.NODE_ENV = 'development'; + mockProjectConfig({ + settings: { mode: 'strict' }, + policy: { dangerousWords: [], ignoredTools: [] }, + environments: {}, + }); + // In strict mode every tool that isn't ignored requires approval + expect((await evaluatePolicy('create_user')).decision).toBe('review'); + }); + + it('standard mode allows non-dangerous tools regardless of environment', async () => { + process.env.NODE_ENV = 'production'; + mockProjectConfig({ + settings: { mode: 'standard' }, + policy: { dangerousWords: ['delete'], ignoredTools: [] }, + environments: {}, + }); + // delete_user is dangerous in any mode β€” confirm standard mode still blocks it + expect((await evaluatePolicy('delete_user')).decision).toBe('review'); + // Safe tools are always allowed in standard mode + expect((await evaluatePolicy('invoke_lambda')).decision).toBe('allow'); + }); +}); + +// ── Custom policy ───────────────────────────────────────────────────────────── + +describe('custom policy', () => { + it('respects user-defined dangerousWords', async () => { + mockProjectConfig({ + settings: { mode: 'standard' }, + policy: { dangerousWords: ['deploy'], ignoredTools: [] }, + environments: {}, + }); + expect((await evaluatePolicy('deploy_to_prod')).decision).toBe('review'); + // Note: dangerousWords are additive β€” defaults (delete, rm, etc.) are still active. + // Use a word that's not in the default list to verify only custom words are 'allow'. + expect((await evaluatePolicy('invoke_lambda')).decision).toBe('allow'); + }); + + it('respects user-defined ignoredTools', async () => { + mockProjectConfig({ + settings: { mode: 'standard' }, + policy: { dangerousWords: ['delete'], ignoredTools: ['delete_*'] }, + environments: {}, + }); + expect((await evaluatePolicy('delete_temp_files')).decision).toBe('allow'); + }); +}); + +// ── Global config ───────────────────────────────────────────────────────────── + +describe('global config (~/.node9/config.json)', () => { + it('is used when no project config exists', async () => { + mockGlobalConfig({ + settings: { mode: 'standard' }, + policy: { dangerousWords: ['nuke'], ignoredTools: [] }, + environments: {}, + }); + expect((await evaluatePolicy('nuke_everything')).decision).toBe('review'); + // dangerousWords are additive β€” use a word absent from both default and custom lists + expect((await evaluatePolicy('invoke_lambda')).decision).toBe('allow'); + }); + + it('project config settings take precedence over global config settings', async () => { + mockBothConfigs( + // project: standard mode (overrides global strict) + { + settings: { mode: 'standard' }, + policy: { dangerousWords: [], ignoredTools: [] }, + environments: {}, + }, + // global: strict mode + { + settings: { mode: 'strict' }, + policy: { dangerousWords: [], ignoredTools: [] }, + environments: {}, + } + ); + // Project's standard mode wins β€” create_user is safe in standard mode + expect((await evaluatePolicy('create_user')).decision).toBe('allow'); + }); + + it('falls back to hardcoded defaults when neither config exists', async () => { + // existsSpy returns false for all paths (set in beforeEach) + expect((await evaluatePolicy('delete_user')).decision).toBe('review'); + expect((await evaluatePolicy('list_users')).decision).toBe('allow'); + }); +}); + +// ── authorizeHeadless β€” full coverage ───────────────────────────────────────── + describe('authorizeHeadless', () => { - it('returns approved:true for safe actions', async () => { - const result = await authorizeHeadless('list_users', {}); - expect(result).toEqual({ approved: true }); + it('returns approved:true for safe tools', async () => { + expect(await authorizeHeadless('list_users', {})).toEqual({ approved: true }); + }); + + it('returns approved:false with noApprovalMechanism when no API key', async () => { + // Disable native approver so racePromises is empty β†’ noApprovalMechanism + mockNoNativeConfig(); + const result = await authorizeHeadless('delete_user', {}); + expect(result.approved).toBe(false); + expect(result.noApprovalMechanism).toBe(true); }); - it('returns approved:false with a helpful reason when no API key is configured', async () => { + it('calls cloud API and returns approved:true on approval', async () => { + // approvers.cloud must be true for cloud enforcement to activate; disable native so cloud wins + mockGlobalConfig({ + settings: { slackEnabled: true, approvers: { native: false, cloud: true } }, + }); + process.env.NODE9_API_KEY = 'test-key'; + vi.stubGlobal( + 'fetch', + vi.fn().mockResolvedValue({ + ok: true, + json: async () => ({ approved: true, message: 'Approved via Slack' }), + }) + ); + const result = await authorizeHeadless('delete_user', { id: 1 }); + expect(result.approved).toBe(true); + }); + + it('returns approved:false when cloud API denies', async () => { + mockGlobalConfig({ + settings: { slackEnabled: true, approvers: { native: false, cloud: true } }, + }); + process.env.NODE9_API_KEY = 'test-key'; + vi.stubGlobal( + 'fetch', + vi.fn().mockResolvedValue({ + ok: true, + json: async () => ({ approved: false }), + }) + ); + const result = await authorizeHeadless('delete_user', { id: 1 }); + expect(result.approved).toBe(false); + }); + + it('returns approved:false when cloud API call fails', async () => { + mockNoNativeConfig(); + process.env.NODE9_API_KEY = 'test-key'; + vi.stubGlobal('fetch', vi.fn().mockRejectedValue(new Error('Network error'))); const result = await authorizeHeadless('delete_user', {}); expect(result.approved).toBe(false); - expect(result.reason).toMatch(/node9 login/i); + }); + + it('does NOT prompt on TTY β€” headless means headless', async () => { + mockNoNativeConfig(); + Object.defineProperty(process.stdout, 'isTTY', { value: true, configurable: true }); + const confirm = await getConfirm(); + const result = await authorizeHeadless('delete_user', {}); + expect(result.approved).toBe(false); + expect(confirm).not.toHaveBeenCalled(); + }); +}); + +// ── evaluatePolicy β€” project config ────────────────────────────────────────── + +describe('evaluatePolicy β€” project config', () => { + it('returns "review" for dangerous tool', async () => { + expect((await evaluatePolicy('delete_user')).decision).toBe('review'); + }); + + it('returns "allow" for safe tool in standard mode', async () => { + expect((await evaluatePolicy('create_user')).decision).toBe('allow'); + }); + + it('respects project-level dangerousWords override', async () => { + mockProjectConfig({ + settings: { mode: 'standard' }, + policy: { dangerousWords: ['deploy'], ignoredTools: [] }, + environments: {}, + }); + expect((await evaluatePolicy('deploy_app')).decision).toBe('review'); + // dangerousWords are additive β€” defaults still apply, use a clearly safe word + expect((await evaluatePolicy('invoke_lambda')).decision).toBe('allow'); + }); +}); + +// ── Persistent decisions ────────────────────────────────────────────────────── + +describe('getPersistentDecision', () => { + it('returns null when decisions file does not exist', () => { + // existsSpy already returns false in beforeEach + expect(getPersistentDecision('delete_user')).toBeNull(); + }); + + it('returns "allow" when tool is set to always allow', () => { + const decisionsPath = path.join('/mock/home', '.node9', 'decisions.json'); + existsSpy.mockImplementation((p) => String(p) === decisionsPath); + readSpy.mockImplementation((p) => + String(p) === decisionsPath ? JSON.stringify({ delete_user: 'allow' }) : '' + ); + expect(getPersistentDecision('delete_user')).toBe('allow'); + }); + + it('returns "deny" when tool is set to always deny', () => { + const decisionsPath = path.join('/mock/home', '.node9', 'decisions.json'); + existsSpy.mockImplementation((p) => String(p) === decisionsPath); + readSpy.mockImplementation((p) => + String(p) === decisionsPath ? JSON.stringify({ delete_user: 'deny' }) : '' + ); + expect(getPersistentDecision('delete_user')).toBe('deny'); + }); + + it('returns null for an unrecognised value', () => { + const decisionsPath = path.join('/mock/home', '.node9', 'decisions.json'); + existsSpy.mockImplementation((p) => String(p) === decisionsPath); + readSpy.mockImplementation((p) => + String(p) === decisionsPath ? JSON.stringify({ delete_user: 'maybe' }) : '' + ); + expect(getPersistentDecision('delete_user')).toBeNull(); + }); +}); + +describe('authorizeHeadless β€” persistent decisions', () => { + it('approves without API when persistent decision is "allow"', async () => { + const decisionsPath = path.join('/mock/home', '.node9', 'decisions.json'); + existsSpy.mockImplementation((p) => String(p) === decisionsPath); + readSpy.mockImplementation((p) => + String(p) === decisionsPath ? JSON.stringify({ delete_user: 'allow' }) : '' + ); + const result = await authorizeHeadless('delete_user', {}); + expect(result.approved).toBe(true); + }); + + it('blocks without API when persistent decision is "deny"', async () => { + const decisionsPath = path.join('/mock/home', '.node9', 'decisions.json'); + existsSpy.mockImplementation((p) => String(p) === decisionsPath); + readSpy.mockImplementation((p) => + String(p) === decisionsPath ? JSON.stringify({ delete_user: 'deny' }) : '' + ); + const result = await authorizeHeadless('delete_user', {}); + expect(result.approved).toBe(false); + expect(result.reason).toMatch(/always deny/i); + }); +}); + +// ── isDaemonRunning ─────────────────────────────────────────────────────────── + +describe('isDaemonRunning', () => { + it('returns false when PID file does not exist', () => { + // existsSpy returns false (set in beforeEach) + expect(isDaemonRunning()).toBe(false); + }); + + it('returns false when PID file has wrong port', () => { + const pidPath = path.join('/mock/home', '.node9', 'daemon.pid'); + existsSpy.mockImplementation((p) => String(p) === pidPath); + readSpy.mockImplementation((p) => + String(p) === pidPath ? JSON.stringify({ pid: process.pid, port: 9999 }) : '' + ); + expect(isDaemonRunning()).toBe(false); + }); + + it('returns true when PID exists and process is alive', () => { + const pidPath = path.join('/mock/home', '.node9', 'daemon.pid'); + existsSpy.mockImplementation((p) => String(p) === pidPath); + readSpy.mockImplementation((p) => + // Use current process PID so kill(pid, 0) succeeds + String(p) === pidPath ? JSON.stringify({ pid: process.pid, port: 7391 }) : '' + ); + expect(isDaemonRunning()).toBe(true); }); }); diff --git a/src/__tests__/gemini_integration.test.ts b/src/__tests__/gemini_integration.test.ts index e453fd3..262fbdb 100644 --- a/src/__tests__/gemini_integration.test.ts +++ b/src/__tests__/gemini_integration.test.ts @@ -1,4 +1,14 @@ -import { describe, it, expect, vi, beforeEach } from 'vitest'; +// 1. MUST be the very first lines of the file +import { vi } from 'vitest'; + +// 2. Add '.js' to the path and use 'mockResolvedValue' (since it's an async function now) +vi.mock('../ui/native.js', () => ({ + askNativePopup: vi.fn().mockResolvedValue('deny'), + sendDesktopNotification: vi.fn(), +})); + +// 3. Now perform your regular imports +import { describe, it, expect, beforeEach } from 'vitest'; import fs from 'fs'; import os from 'os'; import { evaluatePolicy, authorizeHeadless, _resetConfigCache, DANGEROUS_WORDS } from '../core.js'; @@ -24,7 +34,7 @@ function mockConfig(config: MockConfig) { readSpy.mockImplementation((p) => { if (String(p) === globalPath) { return JSON.stringify({ - settings: { mode: 'standard', ...config.settings }, + settings: { mode: 'standard', approvers: { native: false }, ...config.settings }, policy: { dangerousWords: DANGEROUS_WORDS, // Use defaults! ignoredTools: [], @@ -54,26 +64,26 @@ describe('Gemini Integration Security', () => { it('identifies "Shell" (capital S) as a shell-executing tool', async () => { mockConfig({}); const result = await evaluatePolicy('Shell', { command: 'rm -rf /' }); - expect(result).toBe('review'); + expect(result.decision).toBe('review'); }); it('identifies "run_shell_command" as a shell-executing tool', async () => { mockConfig({}); const result = await evaluatePolicy('run_shell_command', { command: 'rm -rf /' }); - expect(result).toBe('review'); + expect(result.decision).toBe('review'); }); it('correctly parses complex shell commands inside run_shell_command', async () => { mockConfig({}); const result = await evaluatePolicy('run_shell_command', { command: 'ls && rm -rf tmp' }); - expect(result).toBe('review'); + expect(result.decision).toBe('review'); }); it('blocks dangerous commands in Gemini hooks without API key', async () => { mockConfig({}); const result = await authorizeHeadless('Shell', { command: 'rm -rf /' }); expect(result.approved).toBe(false); - expect(result.reason).toContain('Node9 blocked "Shell"'); + expect(result.noApprovalMechanism).toBe(true); }); it('allows safe shell commands in Gemini hooks', async () => { @@ -95,12 +105,35 @@ describe('Gemini Integration Security', () => { const dangerousResult = await evaluatePolicy('Database.query', { payload: { sql: 'DROP TABLE users;' }, }); - expect(dangerousResult).toBe('review'); + expect(dangerousResult.decision).toBe('review'); const safeResult = await evaluatePolicy('Database.query', { payload: { sql: 'SELECT * FROM users;' }, }); - expect(safeResult).toBe('allow'); + expect(safeResult.decision).toBe('allow'); + }); +}); + +describe('Gemini BeforeTool payload format', () => { + it('evaluates tool policy from Gemini { name, args } format', async () => { + mockConfig({}); + // Gemini sends { name, args } not { tool_name, tool_input } + const dangerous = await evaluatePolicy('Shell', { command: 'rm -rf /' }); + expect(dangerous.decision).toBe('review'); + }); + + it('blocks dangerous Gemini tool via name/args format', async () => { + mockConfig({}); + const result = await authorizeHeadless('Shell', { command: 'rm -rf /' }); + expect(result.approved).toBe(false); + }); + + it('allows safe Gemini read tool via name/args format', async () => { + mockConfig({ + policy: { ignoredTools: ['read_*', 'ReadFile'] }, + }); + const result = await authorizeHeadless('ReadFile', { path: '/etc/hosts' }); + expect(result.approved).toBe(true); }); }); diff --git a/src/__tests__/protect.test.ts b/src/__tests__/protect.test.ts index de57840..c821a4d 100644 --- a/src/__tests__/protect.test.ts +++ b/src/__tests__/protect.test.ts @@ -1,25 +1,41 @@ import { describe, it, expect, vi, beforeEach } from 'vitest'; import fs from 'fs'; +import os from 'os'; import { protect } from '../index.js'; import { _resetConfigCache } from '../core.js'; -vi.mock('@inquirer/prompts', () => ({ - confirm: vi.fn(), +// Fully block all HITL channels β€” tests use deterministic mechanisms only +vi.mock('@inquirer/prompts', () => ({ confirm: vi.fn() })); +vi.mock('../ui/native', () => ({ + askNativePopup: vi.fn().mockReturnValue('deny'), + sendDesktopNotification: vi.fn(), })); -vi.spyOn(fs, 'existsSync').mockReturnValue(false); -vi.spyOn(fs, 'readFileSync'); +const existsSpy = vi.spyOn(fs, 'existsSync').mockReturnValue(false); +const readSpy = vi.spyOn(fs, 'readFileSync').mockReturnValue(''); +vi.spyOn(os, 'homedir').mockReturnValue('/mock/home'); beforeEach(() => { _resetConfigCache(); delete process.env.NODE9_API_KEY; - Object.defineProperty(process.stdout, 'isTTY', { value: true, configurable: true }); + existsSpy.mockReturnValue(false); + readSpy.mockReturnValue(''); + Object.defineProperty(process.stdout, 'isTTY', { value: false, configurable: true }); }); +/** Grant approval for a tool via a persistent decision file (no HITL needed). */ +function setPersistentDecision(toolName: string, decision: 'allow' | 'deny') { + const decisionsPath = '/mock/home/.node9/decisions.json'; + existsSpy.mockImplementation((p) => String(p) === decisionsPath); + readSpy.mockImplementation((p) => + String(p) === decisionsPath ? JSON.stringify({ [toolName]: decision }) : '' + ); +} + describe('protect()', () => { it('calls the wrapped function and returns its result when approved', async () => { - const { confirm } = await import('@inquirer/prompts'); - vi.mocked(confirm).mockResolvedValue(true); + // Approval via persistent decision β€” no human interaction needed + setPersistentDecision('delete_resource', 'allow'); const fn = vi.fn().mockResolvedValue('ok'); const secured = protect('delete_resource', fn); @@ -31,8 +47,8 @@ describe('protect()', () => { }); it('throws and does NOT call the wrapped function when denied', async () => { - const { confirm } = await import('@inquirer/prompts'); - vi.mocked(confirm).mockResolvedValue(false); + // Denial via persistent decision β€” no human interaction needed + setPersistentDecision('delete_resource', 'deny'); const fn = vi.fn(); const secured = protect('delete_resource', fn); @@ -49,14 +65,14 @@ describe('protect()', () => { const result = await secured(); + // Ignored tool β€” fast-path allow with no approval channel touched expect(confirm).not.toHaveBeenCalled(); expect(fn).toHaveBeenCalledTimes(1); expect(result).toBe('data'); }); it('preserves the original function return type', async () => { - const { confirm } = await import('@inquirer/prompts'); - vi.mocked(confirm).mockResolvedValue(true); + setPersistentDecision('delete_record', 'allow'); const fn = vi.fn().mockResolvedValue({ id: 1, name: 'test' }); const secured = protect('delete_record', fn); diff --git a/src/__tests__/redactor.test.ts b/src/__tests__/redactor.test.ts index 6f86492..7b59c50 100644 --- a/src/__tests__/redactor.test.ts +++ b/src/__tests__/redactor.test.ts @@ -22,10 +22,12 @@ describe('redactSecrets', () => { expect(redactSecrets('password: "password_example_123"')).toContain('password: "********'); }); - it('masks generic long entropy strings', () => { + it('does NOT mask bare long strings without a secret prefix β€” avoids redacting SHAs, paths, IDs', () => { + // Pattern 3 was removed: bare long alphanumeric strings like git SHAs should NOT be redacted. + // Only strings with a recognised prefix (api_key=, token=, Authorization: Bearer) are redacted. const input = 'The hash is a1b2c3d4e5f6g7h8i9j0k1l2m3n4o5p6q7r8s9t0u1v2w3x4y5z6a7b8c9d0e1f2'; const output = redactSecrets(input); - expect(output).toContain('********'); + expect(output).toBe(input); // unchanged β€” no recognised prefix }); it('does not mask short, safe words', () => { diff --git a/src/__tests__/setup.test.ts b/src/__tests__/setup.test.ts index d0817af..9d46ae0 100644 --- a/src/__tests__/setup.test.ts +++ b/src/__tests__/setup.test.ts @@ -89,7 +89,8 @@ describe('setupClaude', () => { const written = writtenTo(mcpPath); expect(written.mcpServers.github.command).toBe('node9'); - expect(written.mcpServers.github.args[0]).toBe('proxy'); + // args are the full original command parts β€” no 'proxy' indirection + expect(written.mcpServers.github.args).toEqual(['npx', '-y', 'server-github']); }); it('skips MCP wrapping when user denies', async () => { @@ -105,7 +106,7 @@ describe('setupClaude', () => { it('skips MCP servers that are already wrapped', async () => { withExistingFile(mcpPath, { - mcpServers: { github: { command: 'node9', args: ['proxy', 'npx server-github'] } }, + mcpServers: { github: { command: 'node9', args: ['npx', 'server-github'] } }, }); const confirm = await getConfirm(); @@ -194,8 +195,8 @@ describe('setupCursor', () => { expect(confirm).not.toHaveBeenCalled(); const written = writtenTo(hooksPath); expect(written.version).toBe(1); - expect(written.hooks.preToolUse[0].command).toBe('node9'); - expect(written.hooks.postToolUse[0].command).toBe('node9'); + expect(written.hooks.preToolUse[0].command).toBe('node9 check'); + expect(written.hooks.postToolUse[0].command).toBe('node9 log'); }); it('does not add hooks that already exist', async () => { @@ -233,7 +234,7 @@ describe('setupCursor', () => { const written = writtenTo(mcpPath); expect(written.mcpServers.brave.command).toBe('node9'); - expect(written.mcpServers.brave.args[0]).toBe('proxy'); + expect(written.mcpServers.brave.args).toEqual(['npx', 'server-brave']); }); it('skips MCP wrapping when user denies', async () => { @@ -259,6 +260,6 @@ describe('setupCursor', () => { // node9 should be appended, not replace the existing hook expect(written.hooks.preToolUse).toHaveLength(2); expect(written.hooks.preToolUse[0].command).toBe('some-other-tool'); - expect(written.hooks.preToolUse[1].command).toBe('node9'); + expect(written.hooks.preToolUse[1].command).toBe('node9 check'); }); }); diff --git a/src/cli.ts b/src/cli.ts index f040efc..eadf851 100644 --- a/src/cli.ts +++ b/src/cli.ts @@ -1,49 +1,134 @@ #!/usr/bin/env node import { Command } from 'commander'; -import { authorizeAction, authorizeHeadless, redactSecrets } from './core'; +import { + authorizeHeadless, + redactSecrets, + DANGEROUS_WORDS, + isDaemonRunning, + getCredentials, + checkPause, + pauseNode9, + resumeNode9, + getConfig, // Ensure this is exported from core.ts! +} from './core'; import { setupClaude, setupGemini, setupCursor } from './setup'; -import { spawn } from 'child_process'; +import { startDaemon, stopDaemon, daemonStatus, DAEMON_PORT, DAEMON_HOST } from './daemon/index'; +import { spawn, execSync } from 'child_process'; import { parseCommandString } from 'execa'; +import { execa } from 'execa'; import chalk from 'chalk'; import readline from 'readline'; import fs from 'fs'; import path from 'path'; import os from 'os'; +import { createShadowSnapshot, applyUndo, getLatestSnapshotHash } from './undo'; +import { confirm } from '@inquirer/prompts'; const { version } = JSON.parse( fs.readFileSync(path.join(__dirname, '../package.json'), 'utf-8') ) as { version: string }; +/** Parse a duration string like "15m", "1h", "30s" β†’ milliseconds, or null if invalid. */ +function parseDuration(str: string): number | null { + const m = str.trim().match(/^(\d+(?:\.\d+)?)\s*(s|m|h|d)?$/i); + if (!m) return null; + const n = parseFloat(m[1]); + switch ((m[2] ?? 'm').toLowerCase()) { + case 's': + return Math.round(n * 1_000); + case 'm': + return Math.round(n * 60_000); + case 'h': + return Math.round(n * 3_600_000); + case 'd': + return Math.round(n * 86_400_000); + default: + return null; + } +} + function sanitize(value: string): string { // eslint-disable-next-line no-control-regex return value.replace(/[\x00-\x1F\x7F]/g, ''); } -const program = new Command(); +function openBrowserLocal() { + const url = `http://${DAEMON_HOST}:${DAEMON_PORT}/`; + try { + const opts = { stdio: 'ignore' as const }; + if (process.platform === 'darwin') execSync(`open "${url}"`, opts); + else if (process.platform === 'win32') execSync(`cmd /c start "" "${url}"`, opts); + else execSync(`xdg-open "${url}"`, opts); + } catch {} +} + +async function autoStartDaemonAndWait(): Promise { + try { + const child = spawn('node9', ['daemon'], { + detached: true, + stdio: 'ignore', + env: { ...process.env, NODE9_AUTO_STARTED: '1' }, + }); + child.unref(); + for (let i = 0; i < 20; i++) { + await new Promise((r) => setTimeout(r, 250)); + if (!isDaemonRunning()) continue; + // Verify the HTTP server is actually accepting connections, not just that + // the process is alive. isDaemonRunning() only checks the PID file, which + // could be stale (OS PID reuse) or written before the socket is fully ready. + try { + const res = await fetch('http://127.0.0.1:7391/settings', { + signal: AbortSignal.timeout(500), + }); + if (res.ok) { + // Open the browser NOW β€” before the approval request is registered β€” + // so the browser has time to connect SSE. If we wait until POST /check, + // broadcast('add') fires with sseClients.size === 0 and the request + // depends on the async openBrowser() inside the daemon, which can lose + // the race with the browser's own page-load timing. + openBrowserLocal(); + return true; + } + } catch { + // HTTP not ready yet β€” keep polling + } + } + } catch {} + return false; +} +const program = new Command(); program.name('node9').description('The Sudo Command for AI Agents').version(version); -// Helper for the Proxy logic async function runProxy(targetCommand: string) { - console.log(chalk.green(`πŸš€ Node9 Proxy Active: Monitoring [${targetCommand}]`)); const commandParts = parseCommandString(targetCommand); const cmd = commandParts[0]; const args = commandParts.slice(1); - const child = spawn(cmd, args, { - stdio: ['pipe', 'pipe', 'inherit'], + let executable = cmd; + try { + const { stdout } = await execa('which', [cmd]); + if (stdout) executable = stdout.trim(); + } catch {} + + console.log(chalk.green(`πŸš€ Node9 Proxy Active: Monitoring [${targetCommand}]`)); + + // Spawn the MCP Server / Shell command + const child = spawn(executable, args, { + stdio: ['pipe', 'pipe', 'inherit'], // We control STDIN and STDOUT shell: true, + env: { ...process.env, FORCE_COLOR: '1' }, }); - const rl = readline.createInterface({ input: process.stdin, terminal: true }); - rl.on('line', (line) => { - child.stdin.write(line + '\n'); - }); + // ── INTERCEPT INPUT (Agent -> Server) ── + // This is where 'tools/call' requests come from + const agentIn = readline.createInterface({ input: process.stdin, terminal: false }); - const childOut = readline.createInterface({ input: child.stdout, terminal: false }); - childOut.on('line', async (line) => { + agentIn.on('line', async (line) => { try { const message = JSON.parse(line); + + // If the Agent is trying to call a tool if ( message.method === 'call_tool' || message.method === 'tools/call' || @@ -51,22 +136,38 @@ async function runProxy(targetCommand: string) { ) { const name = message.params?.name || message.params?.tool_name || 'unknown'; const toolArgs = message.params?.arguments || message.params?.tool_input || {}; - const approved = await authorizeAction(sanitize(name), toolArgs); - if (!approved) { + + // Use our Race Engine to authorize + const result = await authorizeHeadless(sanitize(name), toolArgs, true, { + agent: 'Proxy/MCP', + }); + + if (!result.approved) { + // If denied, send the error back to the Agent and DO NOT forward to the server const errorResponse = { jsonrpc: '2.0', id: message.id, - error: { code: -32000, message: 'Node9: Action denied.' }, + error: { + code: -32000, + message: `Node9: Action denied. ${result.reason || ''}`, + }, }; - child.stdin.write(JSON.stringify(errorResponse) + '\n'); - return; + process.stdout.write(JSON.stringify(errorResponse) + '\n'); + return; // Stop the command here! } } - process.stdout.write(line + '\n'); + // If approved or not a tool call, forward it to the server's STDIN + child.stdin.write(line + '\n'); } catch { - process.stdout.write(line + '\n'); + // If it's not JSON (raw shell usage), just forward it + child.stdin.write(line + '\n'); } }); + + // ── FORWARD OUTPUT (Server -> Agent) ── + // We just pass the server's responses back to the agent as-is + child.stdout.pipe(process.stdout); + child.on('exit', (code) => process.exit(code || 0)); } @@ -74,39 +175,104 @@ async function runProxy(targetCommand: string) { program .command('login') .argument('') - .action((apiKey) => { + .option('--local', 'Save key for audit/logging only β€” local config still controls all decisions') + .option('--profile ', 'Save as a named profile (default: "default")') + .action((apiKey, options: { local?: boolean; profile?: string }) => { + const DEFAULT_API_URL = 'https://api.node9.ai/api/v1/intercept'; const credPath = path.join(os.homedir(), '.node9', 'credentials.json'); if (!fs.existsSync(path.dirname(credPath))) fs.mkdirSync(path.dirname(credPath), { recursive: true }); - fs.writeFileSync( - credPath, - JSON.stringify({ apiKey, apiUrl: 'https://api.node9.ai/api/v1/intercept' }, null, 2) - ); - console.log(chalk.green(`βœ… Logged in.`)); + + const profileName = options.profile || 'default'; + let existingCreds: Record = {}; + try { + if (fs.existsSync(credPath)) { + const raw = JSON.parse(fs.readFileSync(credPath, 'utf-8')) as Record; + if (raw.apiKey) { + existingCreds = { + default: { apiKey: raw.apiKey, apiUrl: raw.apiUrl || DEFAULT_API_URL }, + }; + } else { + existingCreds = raw; + } + } + } catch {} + + existingCreds[profileName] = { apiKey, apiUrl: DEFAULT_API_URL }; + fs.writeFileSync(credPath, JSON.stringify(existingCreds, null, 2), { mode: 0o600 }); + + if (profileName === 'default') { + const configPath = path.join(os.homedir(), '.node9', 'config.json'); + let config: Record = {}; + try { + if (fs.existsSync(configPath)) + config = JSON.parse(fs.readFileSync(configPath, 'utf-8')) as Record; + } catch {} + if (!config.settings || typeof config.settings !== 'object') config.settings = {}; + const s = config.settings as Record; + const approvers = (s.approvers as Record) || { + native: true, + browser: true, + cloud: true, + terminal: true, + }; + approvers.cloud = !options.local; + s.approvers = approvers; + if (!fs.existsSync(path.dirname(configPath))) + fs.mkdirSync(path.dirname(configPath), { recursive: true }); + fs.writeFileSync(configPath, JSON.stringify(config, null, 2), { mode: 0o600 }); + } + + if (options.profile && profileName !== 'default') { + console.log(chalk.green(`βœ… Profile "${profileName}" saved`)); + console.log(chalk.gray(` Switch to it per-session: NODE9_PROFILE=${profileName} claude`)); + } else if (options.local) { + console.log(chalk.green(`βœ… Privacy mode πŸ›‘οΈ`)); + console.log(chalk.gray(` All decisions stay on this machine.`)); + } else { + console.log(chalk.green(`βœ… Logged in β€” agent mode`)); + console.log(chalk.gray(` Team policy enforced for all calls via Node9 cloud.`)); + } }); // 2. ADDTO program .command('addto') - .argument('') - .action(async (target) => { + .description('Integrate Node9 with an AI agent') + .addHelpText('after', '\n Supported targets: claude gemini cursor') + .argument('', 'The agent to protect: claude | gemini | cursor') + .action(async (target: string) => { if (target === 'gemini') return await setupGemini(); if (target === 'claude') return await setupClaude(); if (target === 'cursor') return await setupCursor(); + console.error(chalk.red(`Unknown target: "${target}". Supported: claude, gemini, cursor`)); + process.exit(1); }); -import { DANGEROUS_WORDS } from './core'; - -// 3. INIT +// 3. INIT (Upgraded with Enterprise Schema) program .command('init') - .description('Create ~/.node9/config.json with default policy') - .action(() => { + .description('Create ~/.node9/config.json with default policy (safe to run multiple times)') + .option('--force', 'Overwrite existing config') + .action((options) => { const configPath = path.join(os.homedir(), '.node9', 'config.json'); + + if (fs.existsSync(configPath) && !options.force) { + console.log(chalk.yellow(`ℹ️ Global config already exists: ${configPath}`)); + console.log(chalk.gray(` Run with --force to overwrite.`)); + return; + } const defaultConfig = { version: '1.0', - settings: { mode: 'standard' }, + settings: { + mode: 'standard', + autoStartDaemon: true, + enableUndo: true, + enableHookLogDebug: false, + approvers: { native: true, browser: true, cloud: true, terminal: true }, + }, policy: { + sandboxPaths: ['/tmp/**', '**/sandbox/**', '**/test-results/**'], dangerousWords: DANGEROUS_WORDS, ignoredTools: [ 'list_*', @@ -116,29 +282,39 @@ program 'read', 'write', 'edit', - 'multiedit', 'glob', 'grep', 'ls', 'notebookread', 'notebookedit', - 'todoread', - 'todowrite', 'webfetch', 'websearch', 'exitplanmode', 'askuserquestion', + 'agent', + 'task*', ], toolInspection: { bash: 'command', shell: 'command', run_shell_command: 'command', 'terminal.execute': 'command', + 'postgres:query': 'sql', }, rules: [ { action: 'rm', - allowPaths: ['**/node_modules/**', 'dist/**', 'build/**', '.DS_Store'], + allowPaths: [ + '**/node_modules/**', + 'dist/**', + 'build/**', + '.next/**', + 'coverage/**', + '.cache/**', + 'tmp/**', + 'temp/**', + '.DS_Store', + ], }, ], }, @@ -150,52 +326,320 @@ program console.log(chalk.gray(` Edit this file to add custom tool inspection or security rules.`)); }); -// 4. CHECK (Internal Hook) +// 4. STATUS (Upgraded to show Waterfall & Undo status) +program + .command('status') + .description('Show current Node9 mode, policy source, and persistent decisions') + .action(() => { + const creds = getCredentials(); + const daemonRunning = isDaemonRunning(); + + // Grab the fully resolved waterfall config! + const mergedConfig = getConfig(); + const settings = mergedConfig.settings; + + console.log(''); + + // ── Policy authority ──────────────────────────────────────────────────── + if (creds && settings.approvers.cloud) { + console.log(chalk.green(' ● Agent mode') + chalk.gray(' β€” cloud team policy enforced')); + } else if (creds && !settings.approvers.cloud) { + console.log( + chalk.blue(' ● Privacy mode πŸ›‘οΈ') + chalk.gray(' β€” all decisions stay on this machine') + ); + } else { + console.log( + chalk.yellow(' β—‹ Privacy mode πŸ›‘οΈ') + chalk.gray(' β€” no API key (Local rules only)') + ); + } + + // ── Daemon & Architecture ──────────────────────────────────────────────── + console.log(''); + if (daemonRunning) { + console.log( + chalk.green(' ● Daemon running') + chalk.gray(` β†’ http://127.0.0.1:${DAEMON_PORT}/`) + ); + } else { + console.log(chalk.gray(' β—‹ Daemon stopped')); + } + + if (settings.enableUndo) { + console.log( + chalk.magenta(' ● Undo Engine') + + chalk.gray(` β†’ Auto-snapshotting Git repos on AI change`) + ); + } + + // ── Configuration State ────────────────────────────────────────────────── + console.log(''); + const modeLabel = + settings.mode === 'audit' + ? chalk.blue('audit') + : settings.mode === 'strict' + ? chalk.red('strict') + : chalk.white('standard'); + console.log(` Mode: ${modeLabel}`); + + const projectConfig = path.join(process.cwd(), 'node9.config.json'); + const globalConfig = path.join(os.homedir(), '.node9', 'config.json'); + console.log( + ` Local: ${fs.existsSync(projectConfig) ? chalk.green('Active (node9.config.json)') : chalk.gray('Not present')}` + ); + console.log( + ` Global: ${fs.existsSync(globalConfig) ? chalk.green('Active (~/.node9/config.json)') : chalk.gray('Not present')}` + ); + + if (mergedConfig.policy.sandboxPaths.length > 0) { + console.log( + ` Sandbox: ${chalk.green(`${mergedConfig.policy.sandboxPaths.length} safe zones active`)}` + ); + } + + // ── Pause state ────────────────────────────────────────────────────────── + const pauseState = checkPause(); + if (pauseState.paused) { + const expiresAt = pauseState.expiresAt + ? new Date(pauseState.expiresAt).toLocaleTimeString() + : 'indefinitely'; + console.log(''); + console.log( + chalk.yellow(` ⏸ PAUSED until ${expiresAt}`) + chalk.gray(' β€” all tool calls allowed') + ); + } + + console.log(''); + }); + +// 5. DAEMON +program + .command('daemon') + .description('Run the local approval server') + .argument('[action]', 'start | stop | status (default: start)') + .option('-b, --background', 'Start the daemon in the background (detached)') + .option('-o, --openui', 'Start in background and open browser') + .action( + async (action: string | undefined, options: { background?: boolean; openui?: boolean }) => { + const cmd = (action ?? 'start').toLowerCase(); + if (cmd === 'stop') return stopDaemon(); + if (cmd === 'status') return daemonStatus(); + if (cmd !== 'start' && action !== undefined) { + console.error(chalk.red(`Unknown daemon action: "${action}". Use: start | stop | status`)); + process.exit(1); + } + + if (options.openui) { + if (isDaemonRunning()) { + openBrowserLocal(); + console.log(chalk.green(`🌐 Opened browser: http://${DAEMON_HOST}:${DAEMON_PORT}/`)); + process.exit(0); + } + const child = spawn('node9', ['daemon'], { detached: true, stdio: 'ignore' }); + child.unref(); + for (let i = 0; i < 12; i++) { + await new Promise((r) => setTimeout(r, 250)); + if (isDaemonRunning()) break; + } + openBrowserLocal(); + console.log(chalk.green(`\nπŸ›‘οΈ Node9 daemon started + browser opened`)); + process.exit(0); + } + + if (options.background) { + const child = spawn('node9', ['daemon'], { detached: true, stdio: 'ignore' }); + child.unref(); + console.log(chalk.green(`\nπŸ›‘οΈ Node9 daemon started in background (PID ${child.pid})`)); + process.exit(0); + } + + startDaemon(); + } + ); + +// 6. CHECK (Internal Hook - Upgraded with AI Negotiation Loop) program .command('check') .description('Hook handler β€” evaluates a tool call before execution') .argument('[data]', 'JSON string of the tool call') .action(async (data) => { const processPayload = async (raw: string) => { - const logPath = path.join(os.homedir(), '.node9', 'hook-debug.log'); try { if (!raw || raw.trim() === '') process.exit(0); - // Debug: Log raw input and TTY status - if (!fs.existsSync(path.dirname(logPath))) - fs.mkdirSync(path.dirname(logPath), { recursive: true }); - fs.appendFileSync(logPath, `[${new Date().toISOString()}] STDIN: ${raw}\n`); - fs.appendFileSync(logPath, `[${new Date().toISOString()}] TTY: ${process.stdout.isTTY}\n`); - - const payload = JSON.parse(raw) as { tool_name?: string; tool_input?: unknown }; - const toolName = sanitize(payload.tool_name ?? ''); - const toolInput = payload.tool_input ?? {}; - - const { approved, reason } = await authorizeHeadless(toolName, toolInput); - if (approved) process.exit(0); - - const msg = reason ?? `Node9 blocked "${toolName}".`; - - // Ensure block reason is visible in terminal even if Gemini swallows stdout - console.error(chalk.red(`\nπŸ›‘οΈ Node9 Security Block: ${msg}\n`)); - - // Full Claude Code & Gemini compatibility format - process.stdout.write( - JSON.stringify({ - decision: 'block', - reason: msg, - hookSpecificOutput: { - hookEvent_name: 'PreToolUse', - permissionDecision: 'deny', - permissionDecisionReason: msg, - }, - }) + '\n' - ); - process.exit(0); + const payload = JSON.parse(raw) as { + tool_name?: string; + tool_input?: unknown; + name?: string; + args?: unknown; + cwd?: string; + }; + + // Change to the project cwd from the hook payload BEFORE loading config, + // so getConfig() finds the correct node9.config.json for that project. + if (payload.cwd) { + try { + process.chdir(payload.cwd); + } catch { + // ignore if cwd doesn't exist + } + } + + const config = getConfig(); + + // Debug logging β€” controlled by Env Var OR new Settings config + if (process.env.NODE9_DEBUG === '1' || config.settings.enableHookLogDebug) { + const logPath = path.join(os.homedir(), '.node9', 'hook-debug.log'); + if (!fs.existsSync(path.dirname(logPath))) + fs.mkdirSync(path.dirname(logPath), { recursive: true }); + fs.appendFileSync(logPath, `[${new Date().toISOString()}] STDIN: ${raw}\n`); + } + const toolName = sanitize(payload.tool_name ?? payload.name ?? ''); + const toolInput = payload.tool_input ?? payload.args ?? {}; + + const agent = + payload.tool_name !== undefined + ? 'Claude Code' + : payload.name !== undefined + ? 'Gemini CLI' + : 'Terminal'; + const mcpMatch = toolName.match(/^mcp__([^_](?:[^_]|_(?!_))*?)__/i); + const mcpServer = mcpMatch?.[1]; + + // ── THE NEGOTIATION LOOP (TALKING BACK TO THE AI) ─────────────── + // src/cli.ts -> inside the check command action + + const sendBlock = ( + msg: string, + result?: { blockedBy?: string; changeHint?: string; blockedByLabel?: string } + ) => { + // 1. Determine the context (User vs Policy) + const blockedByContext = + result?.blockedByLabel || result?.blockedBy || 'Local Security Policy'; + + // 2. Identify if it was a human decision or an automated rule + const isHumanDecision = + blockedByContext.toLowerCase().includes('user') || + blockedByContext.toLowerCase().includes('daemon') || + blockedByContext.toLowerCase().includes('decision'); + + // 3. Print to the human terminal for visibility + console.error(chalk.red(`\nπŸ›‘ Node9 blocked "${toolName}"`)); + console.error(chalk.gray(` Triggered by: ${blockedByContext}`)); + if (result?.changeHint) console.error(chalk.cyan(` To change: ${result.changeHint}`)); + console.error(''); + + // 4. THE NEGOTIATION PROMPT: This is what the LLM actually reads + let aiFeedbackMessage = ''; + + if (isHumanDecision) { + // Voice for User Rejection + aiFeedbackMessage = `NODE9 SECURITY INTERVENTION: The human user specifically REJECTED this action. + REASON: ${msg || 'No specific reason provided by user.'} + + INSTRUCTIONS FOR AI AGENT: + - Do NOT retry this exact command immediately. + - Explain to the user that you understand they blocked the action. + - Ask the user if there is an alternative approach they would prefer, or if they intended to block this action entirely. + - If you believe this action is critical, explain your reasoning to the user and ask them to run 'node9 pause 15m' to allow you to proceed.`; + } else { + // Voice for Policy/Rule Rejection + aiFeedbackMessage = `NODE9 SECURITY INTERVENTION: Action blocked by automated policy [${blockedByContext}]. + REASON: ${msg} + + INSTRUCTIONS FOR AI AGENT: + - This command violates the current security configuration. + - Do NOT attempt to bypass this rule with bash syntax tricks; it will be blocked again. + - Pivot to a non-destructive or read-only alternative. + - Inform the user which security rule was triggered.`; + } + + // 5. Send the structured JSON back to the LLM agent + process.stdout.write( + JSON.stringify({ + decision: 'block', + reason: aiFeedbackMessage, // This is the core instruction + hookSpecificOutput: { + hookEventName: 'PreToolUse', + permissionDecision: 'deny', + permissionDecisionReason: aiFeedbackMessage, + }, + }) + '\n' + ); + process.exit(0); + }; + if (!toolName) { + sendBlock('Node9: unrecognised hook payload β€” tool name missing.'); + return; + } + + const meta = { agent, mcpServer }; + + // Snapshot BEFORE the tool runs (PreToolUse) so undo can restore to + // the state prior to this change. Snapshotting after (PostToolUse) + // captures the changed state, making undo a no-op. + const STATE_CHANGING_TOOLS_PRE = [ + 'bash', + 'shell', + 'write_file', + 'edit_file', + 'replace', + 'terminal.execute', + 'str_replace_based_edit_tool', + 'create_file', + ]; + if ( + config.settings.enableUndo && + STATE_CHANGING_TOOLS_PRE.includes(toolName.toLowerCase()) + ) { + await createShadowSnapshot(); + } + + // Pass to Headless authorization + const result = await authorizeHeadless(toolName, toolInput, false, meta); + + if (result.approved) { + if (result.checkedBy) + process.stderr.write(`βœ“ node9 [${result.checkedBy}]: "${toolName}" allowed\n`); + process.exit(0); + } + + // Auto-start daemon if allowed + if ( + result.noApprovalMechanism && + !isDaemonRunning() && + !process.env.NODE9_NO_AUTO_DAEMON && + !process.stdout.isTTY && + config.settings.autoStartDaemon + ) { + console.error(chalk.cyan('\nπŸ›‘οΈ Node9: Starting approval daemon automatically...')); + const daemonReady = await autoStartDaemonAndWait(); + if (daemonReady) { + const retry = await authorizeHeadless(toolName, toolInput, false, meta); + if (retry.approved) { + if (retry.checkedBy) + process.stderr.write(`βœ“ node9 [${retry.checkedBy}]: "${toolName}" allowed\n`); + process.exit(0); + } + // Add the dynamic label so we know if it was Cloud, Config, etc. + sendBlock(retry.reason ?? `Node9 blocked "${toolName}".`, { + ...retry, + blockedByLabel: retry.blockedByLabel, + }); + return; + } + } + + // Add the dynamic label to the final block + sendBlock(result.reason ?? `Node9 blocked "${toolName}".`, { + ...result, + blockedByLabel: result.blockedByLabel, + }); } catch (err: unknown) { - const errMsg = err instanceof Error ? err.message : String(err); - fs.appendFileSync(logPath, `[${new Date().toISOString()}] ERROR: ${errMsg}\n`); - process.exit(0); // Fail open on parse error + if (process.env.NODE9_DEBUG === '1') { + const logPath = path.join(os.homedir(), '.node9', 'hook-debug.log'); + const errMsg = err instanceof Error ? err.message : String(err); + fs.appendFileSync(logPath, `[${new Date().toISOString()}] ERROR: ${errMsg}\n`); + } + process.exit(0); // Fail open so we never break Claude on a parse error } }; @@ -203,75 +647,222 @@ program await processPayload(data); } else { let raw = ''; + let processed = false; + const done = async () => { + if (processed) return; + processed = true; + if (!raw.trim()) return process.exit(0); + await processPayload(raw); + }; process.stdin.setEncoding('utf-8'); process.stdin.on('data', (chunk) => (raw += chunk)); - process.stdin.on('end', async () => await processPayload(raw)); - setTimeout(() => { - if (!raw) process.exit(0); - }, 500); + process.stdin.on('end', () => void done()); + setTimeout(() => void done(), 5000); } }); -// 5. LOG (Audit Trail Hook) +// 7. LOG (Audit Trail Hook) program .command('log') .description('PostToolUse hook β€” records executed tool calls') .argument('[data]', 'JSON string of the tool call') .action(async (data) => { - const logPayload = (raw: string) => { + // 1. Added 'async' here to allow 'await' (Fixes Error 1308) + const logPayload = async (raw: string) => { try { if (!raw || raw.trim() === '') process.exit(0); - const payload = JSON.parse(raw) as { tool_name?: string; tool_input?: unknown }; + const payload = JSON.parse(raw) as { + tool_name?: string; + name?: string; + tool_input?: unknown; + args?: unknown; + }; + + // Handle both Claude (tool_name) and Gemini (name) + const tool = sanitize(payload.tool_name ?? payload.name ?? 'unknown'); + const rawInput = payload.tool_input ?? payload.args ?? {}; - // Redact secrets from the input before stringifying for the log const entry = { ts: new Date().toISOString(), - tool: sanitize(payload.tool_name ?? 'unknown'), - input: JSON.parse(redactSecrets(JSON.stringify(payload.tool_input || {}))), + tool: tool, + args: JSON.parse(redactSecrets(JSON.stringify(rawInput))), + decision: 'allowed', + source: 'post-hook', }; const logPath = path.join(os.homedir(), '.node9', 'audit.log'); if (!fs.existsSync(path.dirname(logPath))) fs.mkdirSync(path.dirname(logPath), { recursive: true }); fs.appendFileSync(logPath, JSON.stringify(entry) + '\n'); + + const config = getConfig(); + const STATE_CHANGING_TOOLS = [ + 'bash', + 'shell', + 'write_file', + 'edit_file', + 'replace', + 'terminal.execute', + ]; + + if (config.settings.enableUndo && STATE_CHANGING_TOOLS.includes(tool.toLowerCase())) { + await createShadowSnapshot(); + } } catch { - // Ignored + /* ignore */ } process.exit(0); }; if (data) { - logPayload(data); + await logPayload(data); } else { let raw = ''; process.stdin.setEncoding('utf-8'); process.stdin.on('data', (chunk) => (raw += chunk)); - process.stdin.on('end', () => logPayload(raw)); + process.stdin.on('end', () => { + // Use void to fire the async function from the sync event emitter + void logPayload(raw); + }); setTimeout(() => { if (!raw) process.exit(0); }, 500); } }); -// 6. SMART RUNNER +// 8. PAUSE +program + .command('pause') + .description('Temporarily disable Node9 protection for a set duration') + .option('-d, --duration ', 'How long to pause (e.g. 15m, 1h, 30s)', '15m') + .action((options: { duration: string }) => { + const ms = parseDuration(options.duration); + if (ms === null) { + console.error( + chalk.red(`\n❌ Invalid duration: "${options.duration}". Use format like 15m, 1h, 30s.\n`) + ); + process.exit(1); + } + pauseNode9(ms, options.duration); + const expiresAt = new Date(Date.now() + ms).toLocaleTimeString(); + console.log(chalk.yellow(`\n⏸ Node9 paused until ${expiresAt}`)); + console.log(chalk.gray(` All tool calls will be allowed without review.`)); + console.log(chalk.gray(` Run "node9 resume" to re-enable early.\n`)); + }); + +// 9. RESUME +program + .command('resume') + .description('Re-enable Node9 protection immediately') + .action(() => { + const { paused } = checkPause(); + if (!paused) { + console.log(chalk.gray('\nNode9 is already active β€” nothing to resume.\n')); + return; + } + resumeNode9(); + console.log(chalk.green('\nβ–Ά Node9 resumed β€” protection is active.\n')); + }); + +// 10. SMART RUNNER +const HOOK_BASED_AGENTS: Record = { + claude: 'claude', + gemini: 'gemini', + cursor: 'cursor', +}; + program .argument('[command...]', 'The agent command to run (e.g., gemini)') .action(async (commandArgs) => { if (commandArgs && commandArgs.length > 0) { + const firstArg = commandArgs[0].toLowerCase(); + + if (HOOK_BASED_AGENTS[firstArg] !== undefined) { + const target = HOOK_BASED_AGENTS[firstArg]; + console.error( + chalk.yellow(`\n⚠️ Node9 proxy mode does not support "${target}" directly.`) + ); + console.error(chalk.white(`\n "${target}" uses its own hook system. Use:`)); + console.error( + chalk.green(` node9 addto ${target} `) + chalk.gray('# one-time setup') + ); + console.error(chalk.green(` ${target} `) + chalk.gray('# run normally')); + process.exit(1); + } + const fullCommand = commandArgs.join(' '); + let result = await authorizeHeadless('shell', { command: fullCommand }); - // NEW: Check the command itself against policy before running - // We treat the initial command as a 'shell' tool call - const { approved, reason } = await authorizeHeadless('shell', { command: fullCommand }); - if (!approved) { - console.error(chalk.red(`\n❌ Node9 Blocked: ${reason || 'Dangerous command detected.'}`)); + if ( + result.noApprovalMechanism && + !isDaemonRunning() && + !process.env.NODE9_NO_AUTO_DAEMON && + getConfig().settings.autoStartDaemon + ) { + console.error(chalk.cyan('\nπŸ›‘οΈ Node9: Starting approval daemon automatically...')); + const daemonReady = await autoStartDaemonAndWait(); + if (daemonReady) result = await authorizeHeadless('shell', { command: fullCommand }); + } + + if (result.noApprovalMechanism && process.stdout.isTTY) { + result = await authorizeHeadless('shell', { command: fullCommand }, true); + } + + if (!result.approved) { + console.error( + chalk.red(`\n❌ Node9 Blocked: ${result.reason || 'Dangerous command detected.'}`) + ); process.exit(1); } + console.error(chalk.green('\nβœ… Approved β€” running command...\n')); await runProxy(fullCommand); } else { program.help(); } }); +program + .command('undo') + .description('Revert the project to the state before the last AI action') + .action(async () => { + const hash = getLatestSnapshotHash(); + + if (!hash) { + console.log(chalk.yellow('\nℹ️ No Undo snapshot found for this machine.\n')); + return; + } + + console.log(chalk.magenta.bold('\nβͺ NODE9 UNDO ENGINE')); + console.log(chalk.white(`Target Snapshot: ${chalk.gray(hash.slice(0, 7))}`)); + + const proceed = await confirm({ + message: 'Revert all files to the state before the last AI action?', + default: false, + }); + + if (proceed) { + if (applyUndo(hash)) { + console.log(chalk.green('βœ… Project reverted successfully.\n')); + } else { + console.error(chalk.red('❌ Undo failed. Ensure you are in a Git repository.\n')); + } + } + }); + +process.on('unhandledRejection', (reason) => { + const isCheckHook = process.argv[2] === 'check'; + if (isCheckHook) { + if (process.env.NODE9_DEBUG === '1' || getConfig().settings.enableHookLogDebug) { + const logPath = path.join(os.homedir(), '.node9', 'hook-debug.log'); + const msg = reason instanceof Error ? reason.message : String(reason); + fs.appendFileSync(logPath, `[${new Date().toISOString()}] UNHANDLED: ${msg}\n`); + } + process.exit(0); + } else { + console.error('[Node9] Unhandled error:', reason); + process.exit(1); + } +}); + program.parse(); diff --git a/src/core.ts b/src/core.ts index a2a5fbc..532b776 100644 --- a/src/core.ts +++ b/src/core.ts @@ -6,6 +6,123 @@ import path from 'path'; import os from 'os'; import pm from 'picomatch'; import { parse } from 'sh-syntax'; +import { askNativePopup, sendDesktopNotification } from './ui/native'; + +// ── Feature file paths ──────────────────────────────────────────────────────── +const PAUSED_FILE = path.join(os.homedir(), '.node9', 'PAUSED'); +const TRUST_FILE = path.join(os.homedir(), '.node9', 'trust.json'); + +interface PauseState { + expiry: number; + duration: string; +} +interface TrustEntry { + tool: string; + expiry: number; +} +interface TrustFile { + entries: TrustEntry[]; +} + +// ── Global Pause helpers ────────────────────────────────────────────────────── + +export function checkPause(): { paused: boolean; expiresAt?: number; duration?: string } { + try { + if (!fs.existsSync(PAUSED_FILE)) return { paused: false }; + const state = JSON.parse(fs.readFileSync(PAUSED_FILE, 'utf-8')) as PauseState; + if (state.expiry > 0 && Date.now() >= state.expiry) { + try { + fs.unlinkSync(PAUSED_FILE); + } catch {} + return { paused: false }; + } + return { paused: true, expiresAt: state.expiry, duration: state.duration }; + } catch { + return { paused: false }; + } +} + +function atomicWriteSync(filePath: string, data: string, options?: fs.WriteFileOptions): void { + const dir = path.dirname(filePath); + if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true }); + const tmpPath = `${filePath}.${os.hostname()}.${process.pid}.tmp`; + fs.writeFileSync(tmpPath, data, options); + fs.renameSync(tmpPath, filePath); +} + +export function pauseNode9(durationMs: number, durationStr: string): void { + const state: PauseState = { expiry: Date.now() + durationMs, duration: durationStr }; + atomicWriteSync(PAUSED_FILE, JSON.stringify(state, null, 2)); // Upgraded to atomic +} + +export function resumeNode9(): void { + try { + if (fs.existsSync(PAUSED_FILE)) fs.unlinkSync(PAUSED_FILE); + } catch {} +} + +// ── Trust Session helpers ───────────────────────────────────────────────────── + +function getActiveTrustSession(toolName: string): boolean { + try { + if (!fs.existsSync(TRUST_FILE)) return false; + const trust = JSON.parse(fs.readFileSync(TRUST_FILE, 'utf-8')) as TrustFile; + const now = Date.now(); + const active = trust.entries.filter((e) => e.expiry > now); + if (active.length !== trust.entries.length) { + fs.writeFileSync(TRUST_FILE, JSON.stringify({ entries: active }, null, 2)); + } + return active.some((e) => e.tool === toolName || matchesPattern(toolName, e.tool)); + } catch { + return false; + } +} + +export function writeTrustSession(toolName: string, durationMs: number): void { + try { + let trust: TrustFile = { entries: [] }; + + // 1. Try to read existing trust state + try { + if (fs.existsSync(TRUST_FILE)) { + trust = JSON.parse(fs.readFileSync(TRUST_FILE, 'utf-8')) as TrustFile; + } + } catch { + // If the file is corrupt, start with a fresh object + } + + // 2. Filter out the specific tool (to overwrite) and remove any expired entries + const now = Date.now(); + trust.entries = trust.entries.filter((e) => e.tool !== toolName && e.expiry > now); + + // 3. Add the new time-boxed entry + trust.entries.push({ tool: toolName, expiry: now + durationMs }); + + // 4. Perform the ATOMIC write + atomicWriteSync(TRUST_FILE, JSON.stringify(trust, null, 2)); + } catch (err) { + // Silent fail: Node9 should never crash an AI agent session due to a file error + if (process.env.NODE9_DEBUG === '1') { + console.error('[Node9 Trust Error]:', err); + } + } +} + +function appendAuditModeEntry(toolName: string, args: unknown): void { + try { + const entry = JSON.stringify({ + ts: new Date().toISOString(), + tool: toolName, + args, + decision: 'would-have-blocked', + source: 'audit-mode', + }); + const logPath = path.join(os.homedir(), '.node9', 'audit.log'); + const dir = path.dirname(logPath); + if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true }); + fs.appendFileSync(logPath, entry + '\n'); + } catch {} +} // Default Enterprise Posture export const DANGEROUS_WORDS = [ @@ -30,20 +147,13 @@ function tokenize(toolName: string): string[] { .filter(Boolean); } -function containsDangerousWord(toolName: string, dangerousWords: string[]): boolean { - const tokens = tokenize(toolName); - return dangerousWords.some((word) => tokens.includes(word.toLowerCase())); -} - function matchesPattern(text: string, patterns: string[] | string): boolean { const p = Array.isArray(patterns) ? patterns : [patterns]; if (p.length === 0) return false; - const isMatch = pm(p, { nocase: true, dot: true }); const target = text.toLowerCase(); const directMatch = isMatch(target); if (directMatch) return true; - const withoutDotSlash = text.replace(/^\.\//, ''); return isMatch(withoutDotSlash) || isMatch(`./${withoutDotSlash}`); } @@ -62,9 +172,7 @@ function extractShellCommand( ): string | null { const patterns = Object.keys(toolInspection); const matchingPattern = patterns.find((p) => matchesPattern(toolName, p)); - if (!matchingPattern) return null; - const fieldPath = toolInspection[matchingPattern]; const value = getNestedValue(args, fieldPath); return typeof value === 'string' ? value : null; @@ -76,10 +184,6 @@ interface AstNode { [key: string]: unknown; } -/** - * Robust Shell Parser - * Combines sh-syntax AST with a reliable fallback for keyword detection. - */ async function analyzeShellCommand( command: string ): Promise<{ actions: string[]; paths: string[]; allTokens: string[] }> { @@ -90,18 +194,15 @@ async function analyzeShellCommand( const addToken = (token: string) => { const lower = token.toLowerCase(); allTokens.push(lower); - // If it's a path like /usr/bin/rm, also add 'rm' if (lower.includes('/')) { const segments = lower.split('/').filter(Boolean); allTokens.push(...segments); } - // If it's a flag like -delete, also add 'delete' if (lower.startsWith('-')) { allTokens.push(lower.replace(/^-+/, '')); } }; - // 1. AST Pass (High Fidelity) try { const ast = await parse(command); const walk = (node: AstNode | null) => { @@ -114,10 +215,9 @@ async function analyzeShellCommand( .filter((s: string) => s.length > 0); if (parts.length > 0) { - const action = parts[0]; - actions.push(action.toLowerCase()); - parts.forEach((p) => addToken(p)); - parts.slice(1).forEach((p) => { + actions.push(parts[0].toLowerCase()); + parts.forEach((p: string) => addToken(p)); + parts.slice(1).forEach((p: string) => { if (!p.startsWith('-')) paths.push(p); }); } @@ -138,15 +238,13 @@ async function analyzeShellCommand( }; walk(ast as unknown as AstNode); } catch { - // Fallback logic + // Fallback } - // 2. Semantic Fallback Pass (Ensures no obfuscation bypasses) if (allTokens.length === 0) { const normalized = command.replace(/\\(.)/g, '$1'); const sanitized = normalized.replace(/["'<>]/g, ' '); const segments = sanitized.split(/[|;&]|\$\(|\)|`/); - segments.forEach((segment) => { const tokens = segment.trim().split(/\s+/).filter(Boolean); if (tokens.length > 0) { @@ -161,33 +259,23 @@ async function analyzeShellCommand( } }); } - return { actions, paths, allTokens }; } -/** - * Redactor: Masks common secret patterns (API keys, tokens, auth headers) - */ export function redactSecrets(text: string): string { if (!text) return text; - let redacted = text; - // Pattern 1: Authorization Header (Bearer/Basic) + // Refined Patterns: Only redact when attached to a known label to avoid masking hashes/paths redacted = redacted.replace( /(authorization:\s*(?:bearer|basic)\s+)[a-zA-Z0-9._\-\/\\=]+/gi, '$1********' ); - - // Pattern 2: API Keys, Secrets, Tokens redacted = redacted.replace( /(api[_-]?key|secret|password|token)([:=]\s*['"]?)[a-zA-Z0-9._\-]{8,}/gi, '$1$2********' ); - // Pattern 3: Generic long alphanumeric strings - redacted = redacted.replace(/\b[a-zA-Z0-9]{32,}\b/g, '********'); - return redacted; } @@ -203,8 +291,15 @@ interface PolicyRule { } interface Config { - settings: { mode: string }; + settings: { + mode: string; + autoStartDaemon?: boolean; + enableUndo?: boolean; + enableHookLogDebug?: boolean; + approvers: { native: boolean; browser: boolean; cloud: boolean; terminal: boolean }; + }; policy: { + sandboxPaths: string[]; dangerousWords: string[]; ignoredTools: string[]; toolInspection: Record; @@ -214,8 +309,15 @@ interface Config { } const DEFAULT_CONFIG: Config = { - settings: { mode: 'standard' }, + settings: { + mode: 'standard', + autoStartDaemon: true, + enableUndo: false, + enableHookLogDebug: false, + approvers: { native: true, browser: true, cloud: true, terminal: true }, + }, policy: { + sandboxPaths: [], dangerousWords: DANGEROUS_WORDS, ignoredTools: [ 'list_*', @@ -223,33 +325,12 @@ const DEFAULT_CONFIG: Config = { 'read_*', 'describe_*', 'read', - 'write', - 'edit', - 'multiedit', - 'glob', 'grep', 'ls', - 'notebookread', - 'notebookedit', - 'todoread', - 'todowrite', - 'webfetch', - 'websearch', - 'exitplanmode', 'askuserquestion', ], - toolInspection: { - bash: 'command', - run_shell_command: 'command', - shell: 'command', - 'terminal.execute': 'command', - }, - rules: [ - { - action: 'rm', - allowPaths: ['**/node_modules/**', 'dist/**', 'build/**', '.DS_Store'], - }, - ], + toolInspection: { bash: 'command', shell: 'command' }, + rules: [{ action: 'rm', allowPaths: ['**/node_modules/**', 'dist/**', '.DS_Store'] }], }, environments: {}, }; @@ -260,183 +341,857 @@ export function _resetConfigCache(): void { cachedConfig = null; } +/** + * Reads settings from the global config (~/.node9/config.json) only. + * Intentionally does NOT merge project config β€” these are machine-level + * preferences, not project policies. + */ +export function getGlobalSettings(): { + mode: string; + autoStartDaemon: boolean; + slackEnabled: boolean; + enableTrustSessions: boolean; + allowGlobalPause: boolean; +} { + try { + const globalConfigPath = path.join(os.homedir(), '.node9', 'config.json'); + if (fs.existsSync(globalConfigPath)) { + const parsed = JSON.parse(fs.readFileSync(globalConfigPath, 'utf-8')) as Record< + string, + unknown + >; + const settings = (parsed.settings as Record) || {}; + return { + mode: (settings.mode as string) || 'standard', + autoStartDaemon: settings.autoStartDaemon !== false, + slackEnabled: settings.slackEnabled !== false, + enableTrustSessions: settings.enableTrustSessions === true, + allowGlobalPause: settings.allowGlobalPause !== false, + }; + } + } catch {} + return { + mode: 'standard', + autoStartDaemon: true, + slackEnabled: true, + enableTrustSessions: false, + allowGlobalPause: true, + }; +} + +/** + * Returns true when a Slack API key is stored AND Slack is enabled in config. + * Slack is the approval authority when this is true. + */ +export function hasSlack(): boolean { + const creds = getCredentials(); + if (!creds?.apiKey) return false; + return getGlobalSettings().slackEnabled; +} + +/** + * Reads the internal token from the daemon PID file. + * Used by notifyDaemonViewer / resolveViaDaemon so the Slack flow can + * register and clear viewer-mode cards without needing the CSRF token. + */ +function getInternalToken(): string | null { + try { + const pidFile = path.join(os.homedir(), '.node9', 'daemon.pid'); + if (!fs.existsSync(pidFile)) return null; + const data = JSON.parse(fs.readFileSync(pidFile, 'utf-8')) as Record; + process.kill(data.pid as number, 0); // verify alive + return (data.internalToken as string) ?? null; + } catch { + return null; + } +} + export async function evaluatePolicy( toolName: string, - args?: unknown -): Promise<'allow' | 'review'> { + args?: unknown, + agent?: string // NEW: Added agent metadata parameter +): Promise<{ decision: 'allow' | 'review'; blockedByLabel?: string }> { const config = getConfig(); - if (matchesPattern(toolName, config.policy.ignoredTools)) return 'allow'; + + // 1. Ignored tools (Fast Path) - Always allow these first + if (matchesPattern(toolName, config.policy.ignoredTools)) return { decision: 'allow' }; + + let allTokens: string[] = []; + let actionTokens: string[] = []; + let pathTokens: string[] = []; + + // 2. Tokenize the input const shellCommand = extractShellCommand(toolName, args, config.policy.toolInspection); if (shellCommand) { - const { actions, paths, allTokens } = await analyzeShellCommand(shellCommand); - for (const action of actions) { - // Check if action itself is a path (e.g., /usr/bin/rm), check the basename too - const basename = action.includes('/') ? action.split('/').pop() : action; - const rule = config.policy.rules.find( - (r) => - r.action === action || - matchesPattern(action, r.action) || - (basename && (r.action === basename || matchesPattern(basename, r.action))) - ); + const analyzed = await analyzeShellCommand(shellCommand); + allTokens = analyzed.allTokens; + actionTokens = analyzed.actions; + pathTokens = analyzed.paths; - if (rule) { - if (paths.length > 0) { - const anyBlocked = paths.some((p) => matchesPattern(p, rule.blockPaths || [])); - if (anyBlocked) return 'review'; - const allAllowed = paths.every((p) => matchesPattern(p, rule.allowPaths || [])); - if (allAllowed) return 'allow'; - } - return 'review'; - } + // Inline arbitrary code execution is always a review + const INLINE_EXEC_PATTERN = /^(python3?|bash|sh|zsh|perl|ruby|node|php|lua)\s+(-c|-e|-eval)\s/i; + if (INLINE_EXEC_PATTERN.test(shellCommand.trim())) { + return { decision: 'review', blockedByLabel: 'Node9 Standard (Inline Execution)' }; } - const isDangerous = allTokens.some((token) => - config.policy.dangerousWords.some((word) => token === word.toLowerCase()) + } else { + allTokens = tokenize(toolName); + actionTokens = [toolName]; + } + + // ── 3. CONTEXTUAL RISK DOWNGRADE (PRD Section 3 / Phase 3) ────────────── + // If the human is typing manually, we only block "Nuclear" actions. + const isManual = agent === 'Terminal'; + if (isManual) { + const NUCLEAR_COMMANDS = [ + 'drop', + 'destroy', + 'purge', + 'rmdir', + 'format', + 'truncate', + 'alter', + 'grant', + 'revoke', + 'docker', + ]; + + const hasNuclear = allTokens.some((t) => NUCLEAR_COMMANDS.includes(t.toLowerCase())); + + // If it's manual and NOT nuclear, we auto-allow (bypass standard "dangerous" words like 'rm' or 'delete') + if (!hasNuclear) return { decision: 'allow' }; + + // If it IS nuclear, we fall through to the standard logic so the developer + // gets a "Flagged By: Manual Nuclear Protection" popup. + } + + // ── 4. Sandbox Check (Safe Zones) ─────────────────────────────────────── + if (pathTokens.length > 0 && config.policy.sandboxPaths.length > 0) { + const allInSandbox = pathTokens.every((p) => matchesPattern(p, config.policy.sandboxPaths)); + if (allInSandbox) return { decision: 'allow' }; + } + + // ── 5. Rules Evaluation ───────────────────────────────────────────────── + for (const action of actionTokens) { + const rule = config.policy.rules.find( + (r) => r.action === action || matchesPattern(action, r.action) ); - if (isDangerous) return 'review'; - if (config.settings.mode === 'strict') return 'review'; - return 'allow'; + if (rule) { + if (pathTokens.length > 0) { + const anyBlocked = pathTokens.some((p) => matchesPattern(p, rule.blockPaths || [])); + if (anyBlocked) + return { decision: 'review', blockedByLabel: 'Project/Global Config (Rule Block)' }; + const allAllowed = pathTokens.every((p) => matchesPattern(p, rule.allowPaths || [])); + if (allAllowed) return { decision: 'allow' }; + } + return { decision: 'review', blockedByLabel: 'Project/Global Config (Rule Default Block)' }; + } } - const isDangerous = containsDangerousWord(toolName, config.policy.dangerousWords); - if (isDangerous || config.settings.mode === 'strict') { + + // ── 6. Dangerous Words Evaluation ─────────────────────────────────────── + const isDangerous = allTokens.some((token) => + config.policy.dangerousWords.some((word) => { + const w = word.toLowerCase(); + if (token === w) return true; + try { + return new RegExp(`\\b${w}\\b`, 'i').test(token); + } catch { + return false; + } + }) + ); + + if (isDangerous) { + // Use "Project/Global Config" so E2E tests can verify hierarchy overrides + const label = isManual ? 'Manual Nuclear Protection' : 'Project/Global Config (Dangerous Word)'; + return { decision: 'review', blockedByLabel: label }; + } + + // ── 7. Strict Mode Fallback ───────────────────────────────────────────── + if (config.settings.mode === 'strict') { const envConfig = getActiveEnvironment(config); - if (envConfig?.requireApproval === false) return 'allow'; - return 'review'; + if (envConfig?.requireApproval === false) return { decision: 'allow' }; + return { decision: 'review', blockedByLabel: 'Global Config (Strict Mode Active)' }; } - return 'allow'; + + return { decision: 'allow' }; +} + +/** Returns true when toolName matches an ignoredTools pattern (fast-path, silent allow). */ +export function isIgnoredTool(toolName: string): boolean { + const config = getConfig(); + return matchesPattern(toolName, config.policy.ignoredTools); +} + +const DAEMON_PORT = 7391; +const DAEMON_HOST = '127.0.0.1'; + +export function isDaemonRunning(): boolean { + try { + const pidFile = path.join(os.homedir(), '.node9', 'daemon.pid'); + if (!fs.existsSync(pidFile)) return false; + const { pid, port } = JSON.parse(fs.readFileSync(pidFile, 'utf-8')); + if (port !== DAEMON_PORT) return false; + process.kill(pid, 0); + return true; + } catch { + return false; + } +} + +export function getPersistentDecision(toolName: string): 'allow' | 'deny' | null { + try { + const file = path.join(os.homedir(), '.node9', 'decisions.json'); + if (!fs.existsSync(file)) return null; + const decisions = JSON.parse(fs.readFileSync(file, 'utf-8')) as Record; + const d = decisions[toolName]; + if (d === 'allow' || d === 'deny') return d; + } catch { + /* ignore */ + } + return null; +} + +async function askDaemon( + toolName: string, + args: unknown, + meta?: { agent?: string; mcpServer?: string }, + signal?: AbortSignal // NEW: Added signal +): Promise<'allow' | 'deny' | 'abandoned'> { + const base = `http://${DAEMON_HOST}:${DAEMON_PORT}`; + + // Custom abort logic for Node 18 compatibility + const checkCtrl = new AbortController(); + const checkTimer = setTimeout(() => checkCtrl.abort(), 5000); + const onAbort = () => checkCtrl.abort(); + if (signal) signal.addEventListener('abort', onAbort); + + try { + const checkRes = await fetch(`${base}/check`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ toolName, args, agent: meta?.agent, mcpServer: meta?.mcpServer }), + signal: checkCtrl.signal, + }); + if (!checkRes.ok) throw new Error('Daemon fail'); + const { id } = (await checkRes.json()) as { id: string }; + + const waitCtrl = new AbortController(); + const waitTimer = setTimeout(() => waitCtrl.abort(), 120_000); + const onWaitAbort = () => waitCtrl.abort(); + if (signal) signal.addEventListener('abort', onWaitAbort); + + try { + const waitRes = await fetch(`${base}/wait/${id}`, { signal: waitCtrl.signal }); + if (!waitRes.ok) return 'deny'; + const { decision } = (await waitRes.json()) as { decision: string }; + if (decision === 'allow') return 'allow'; + if (decision === 'abandoned') return 'abandoned'; + return 'deny'; + } finally { + clearTimeout(waitTimer); + if (signal) signal.removeEventListener('abort', onWaitAbort); + } + } finally { + clearTimeout(checkTimer); + if (signal) signal.removeEventListener('abort', onAbort); + } +} + +/** Register a viewer-mode card on the daemon (Slack is the real authority). */ +async function notifyDaemonViewer( + toolName: string, + args: unknown, + meta?: { agent?: string; mcpServer?: string } +): Promise { + const base = `http://${DAEMON_HOST}:${DAEMON_PORT}`; + const res = await fetch(`${base}/check`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + toolName, + args, + slackDelegated: true, + agent: meta?.agent, + mcpServer: meta?.mcpServer, + }), + signal: AbortSignal.timeout(3000), + }); + if (!res.ok) throw new Error('Daemon unreachable'); + const { id } = (await res.json()) as { id: string }; + return id; +} + +/** Clear a viewer-mode card from the daemon once Slack has decided. */ +async function resolveViaDaemon( + id: string, + decision: 'allow' | 'deny', + internalToken: string +): Promise { + const base = `http://${DAEMON_HOST}:${DAEMON_PORT}`; + await fetch(`${base}/resolve/${id}`, { + method: 'POST', + headers: { 'Content-Type': 'application/json', 'X-Node9-Internal': internalToken }, + body: JSON.stringify({ decision }), + signal: AbortSignal.timeout(3000), + }); +} + +/** + * Authorization state machine β€” 6 states based on: + * hasSlack() = credentials.json exists AND slackEnabled + * isDaemonRunning = local approval daemon on localhost:7391 + * allowTerminalFallback = caller allows interactive Y/N + * + * State table: + * hasSlack | daemon | result + * -------- | ------ | ------ + * true | yes | Slack authority + daemon viewer card + * true | no | Slack authority only (no browser) + * false | yes | Browser authority + * false | no | noApprovalMechanism (CLI auto-starts daemon if autoStartDaemon=true) + * false | no+TTY | terminal Y/N prompt (when allowTerminalFallback=true) + * false | no+noTTY | block + */ +export interface AuthResult { + approved: boolean; + reason?: string; + noApprovalMechanism?: boolean; + blockedByLabel?: string; + blockedBy?: + | 'team-policy' + | 'persistent-deny' + | 'local-config' + | 'local-decision' + | 'no-approval-mechanism'; + changeHint?: string; + checkedBy?: + | 'cloud' + | 'daemon' + | 'terminal' + | 'local-policy' + | 'persistent' + | 'trust' + | 'paused' + | 'audit'; } export async function authorizeHeadless( toolName: string, - args: unknown -): Promise<{ approved: boolean; reason?: string }> { - const decision = await evaluatePolicy(toolName, args); - if (decision === 'allow') return { approved: true }; + args: unknown, + allowTerminalFallback = false, + meta?: { agent?: string; mcpServer?: string } +): Promise { + if (process.env.NODE9_PAUSED === '1') return { approved: true, checkedBy: 'paused' }; + const pauseState = checkPause(); + if (pauseState.paused) return { approved: true, checkedBy: 'paused' }; + const creds = getCredentials(); - if (creds?.apiKey) { - const envConfig = getActiveEnvironment(getConfig()); - const approved = await callNode9SaaS(toolName, args, creds, envConfig?.slackChannel); - return { approved }; + const config = getConfig(); + + // 1. Check if we are in any kind of test environment (Vitest, CI, or E2E) + const isTestEnv = !!( + process.env.VITEST || + process.env.NODE_ENV === 'test' || + process.env.CI || + process.env.NODE9_TESTING === '1' + ); + + // Get the actual config from file/defaults + const approvers = isTestEnv + ? { + native: false, + browser: false, + cloud: config.settings.approvers?.cloud ?? true, + terminal: false, + } + : config.settings.approvers || { native: true, browser: true, cloud: true, terminal: true }; + + // 2. THE TEST SILENCER: If we are in a test environment, hard-disable all physical UIs. + // We leave 'cloud' alone so your SaaS/Cloud tests can still manage it via mock configs! + if (process.env.VITEST || process.env.NODE_ENV === 'test' || process.env.NODE9_TESTING === '1') { + approvers.native = false; + approvers.browser = false; + approvers.terminal = false; } - if (process.stdout.isTTY) { - console.log(chalk.bgRed.white.bold(` πŸ›‘ NODE9 INTERCEPTOR `)); - console.log(`${chalk.bold('Action:')} ${chalk.red(toolName)}`); - const approved = await confirm({ message: 'Authorize?', default: false }); - return { approved }; + const isManual = meta?.agent === 'Terminal'; + + let explainableLabel = 'Local Config'; + + if (config.settings.mode === 'audit') { + if (!isIgnoredTool(toolName)) { + const policyResult = await evaluatePolicy(toolName, args, meta?.agent); + if (policyResult.decision === 'review') { + appendAuditModeEntry(toolName, args); + sendDesktopNotification( + 'Node9 Audit Mode', + `Would have blocked "${toolName}" (${policyResult.blockedByLabel || 'Local Config'}) β€” running in audit mode` + ); + } + } + return { approved: true, checkedBy: 'audit' }; } - return { - approved: false, - reason: `Node9 blocked "${toolName}". Run 'node9 login' to enable Slack approvals, or update node9.config.json policy.`, - }; -} -export { getCredentials }; + // Fast Paths (Ignore, Trust, Policy Allow) + if (!isIgnoredTool(toolName)) { + if (getActiveTrustSession(toolName)) return { approved: true, checkedBy: 'trust' }; + const policyResult = await evaluatePolicy(toolName, args, meta?.agent); + if (policyResult.decision === 'allow') return { approved: true, checkedBy: 'local-policy' }; -function getConfig(): Config { - if (cachedConfig) return cachedConfig; - const projectConfig = tryLoadConfig(path.join(process.cwd(), 'node9.config.json')); - if (projectConfig) { - cachedConfig = mergeWithDefaults(projectConfig); - return cachedConfig; + explainableLabel = policyResult.blockedByLabel || 'Local Config'; + + const persistent = getPersistentDecision(toolName); + if (persistent === 'allow') return { approved: true, checkedBy: 'persistent' }; + if (persistent === 'deny') { + return { + approved: false, + reason: `This tool ("${toolName}") is explicitly listed in your 'Always Deny' list.`, + blockedBy: 'persistent-deny', + blockedByLabel: 'Persistent User Rule', + }; + } + } else { + return { approved: true }; } - const globalConfig = tryLoadConfig(path.join(os.homedir(), '.node9', 'config.json')); - if (globalConfig) { - cachedConfig = mergeWithDefaults(globalConfig); - return cachedConfig; + + // ── THE HANDSHAKE (Phase 4.1: Remote Lock Check) ────────────────────────── + let cloudRequestId: string | null = null; + let isRemoteLocked = false; + const cloudEnforced = approvers.cloud && !!creds?.apiKey; + + if (cloudEnforced) { + try { + const envConfig = getActiveEnvironment(getConfig()); + const initResult = await initNode9SaaS(toolName, args, creds!, envConfig?.slackChannel, meta); + + if (!initResult.pending) { + return { + approved: !!initResult.approved, + reason: + initResult.reason || + (initResult.approved ? undefined : 'Action rejected by organization policy.'), + checkedBy: initResult.approved ? 'cloud' : undefined, + blockedBy: initResult.approved ? undefined : 'team-policy', + blockedByLabel: 'Organization Policy (SaaS)', + }; + } + + cloudRequestId = initResult.requestId || null; + isRemoteLocked = !!initResult.remoteApprovalOnly; // πŸ”’ THE GOVERNANCE LOCK + explainableLabel = 'Organization Policy (SaaS)'; + } catch (err: unknown) { + const error = err as Error; + const isAuthError = error.message.includes('401') || error.message.includes('403'); + const isNetworkError = + error.message.includes('fetch') || + error.name === 'AbortError' || + error.message.includes('ECONNREFUSED'); + + const reason = isAuthError + ? 'Invalid or missing API key. Run `node9 login` to generate a key (must start with n9_live_).' + : isNetworkError + ? 'Could not reach the Node9 cloud. Check your network or API URL.' + : error.message; + + console.error( + chalk.yellow(`\n⚠️ Node9: Cloud API Handshake failed β€” ${reason}`) + + chalk.dim(`\n Falling back to local rules...\n`) + ); + } + } + + // ── TERMINAL STATUS ───────────────────────────────────────────────────────── + // Print before the race so the message is guaranteed to show regardless of + // which channel wins (cloud message was previously lost when native popup + // resolved first and aborted the race before pollNode9SaaS could print it). + if (cloudEnforced && cloudRequestId) { + console.error( + chalk.yellow('\nπŸ›‘οΈ Node9: Action suspended β€” waiting for Organization approval.') + ); + console.error(chalk.cyan(' Dashboard β†’ ') + chalk.bold('Mission Control > Flows\n')); + } else if (!cloudEnforced) { + const cloudOffReason = !creds?.apiKey + ? 'no API key β€” run `node9 login` to connect' + : 'privacy mode (cloud disabled)'; + console.error( + chalk.dim(`\nπŸ›‘οΈ Node9: intercepted "${toolName}" β€” cloud off (${cloudOffReason})\n`) + ); } - cachedConfig = DEFAULT_CONFIG; + + // ── THE MULTI-CHANNEL RACE ENGINE ────────────────────────────────────────── + const abortController = new AbortController(); + const { signal } = abortController; + const racePromises: Promise[] = []; + + let viewerId: string | null = null; + const internalToken = getInternalToken(); + + // 🏁 RACER 1: Cloud SaaS Channel (The Poller) + if (cloudEnforced && cloudRequestId) { + racePromises.push( + (async () => { + try { + if (isDaemonRunning() && internalToken) { + viewerId = await notifyDaemonViewer(toolName, args, meta).catch(() => null); + } + const cloudResult = await pollNode9SaaS(cloudRequestId, creds!, signal); + + return { + approved: cloudResult.approved, + reason: cloudResult.approved + ? undefined + : cloudResult.reason || 'Action rejected by organization administrator via Slack.', + checkedBy: cloudResult.approved ? 'cloud' : undefined, + blockedBy: cloudResult.approved ? undefined : 'team-policy', + blockedByLabel: 'Organization Policy (SaaS)', + }; + } catch (err: unknown) { + const error = err as Error; + if (error.name === 'AbortError' || error.message?.includes('Aborted')) throw err; + throw err; + } + })() + ); + } + + // 🏁 RACER 2: Native OS Popup + if (approvers.native && !isManual) { + racePromises.push( + (async () => { + // Pass isRemoteLocked so the popup knows to hide the "Allow" button + const decision = await askNativePopup( + toolName, + args, + meta?.agent, + explainableLabel, + isRemoteLocked, + signal + ); + + if (decision === 'always_allow') { + writeTrustSession(toolName, 3600000); + return { approved: true, checkedBy: 'trust' }; + } + + const isApproved = decision === 'allow'; + return { + approved: isApproved, + reason: isApproved + ? undefined + : "The human user clicked 'Block' on the system dialog window.", + checkedBy: isApproved ? 'daemon' : undefined, + blockedBy: isApproved ? undefined : 'local-decision', + blockedByLabel: 'User Decision (Native)', + }; + })() + ); + } + + // 🏁 RACER 3: Browser Dashboard + if (approvers.browser && isDaemonRunning()) { + racePromises.push( + (async () => { + try { + if (!approvers.native && !cloudEnforced) { + console.error( + chalk.yellow('\nπŸ›‘οΈ Node9: Action suspended β€” waiting for browser approval.') + ); + console.error(chalk.cyan(` URL β†’ http://${DAEMON_HOST}:${DAEMON_PORT}/\n`)); + } + + const daemonDecision = await askDaemon(toolName, args, meta, signal); + if (daemonDecision === 'abandoned') throw new Error('Abandoned'); + + const isApproved = daemonDecision === 'allow'; + return { + approved: isApproved, + reason: isApproved + ? undefined + : 'The human user rejected this action via the Node9 Browser Dashboard.', + checkedBy: isApproved ? 'daemon' : undefined, + blockedBy: isApproved ? undefined : 'local-decision', + blockedByLabel: 'User Decision (Browser)', + }; + } catch (err) { + throw err; + } + })() + ); + } + + // 🏁 RACER 4: Terminal Prompt + if (approvers.terminal && allowTerminalFallback && process.stdout.isTTY) { + racePromises.push( + (async () => { + try { + console.log(chalk.bgRed.white.bold(` πŸ›‘ NODE9 INTERCEPTOR `)); + console.log(`${chalk.bold('Action:')} ${chalk.red(toolName)}`); + console.log(`${chalk.bold('Flagged By:')} ${chalk.yellow(explainableLabel)}`); + + if (isRemoteLocked) { + console.log(chalk.yellow(`⚑ LOCKED BY ADMIN POLICY: Waiting for Slack Approval...\n`)); + // If locked, we don't ask [Y/n]. We just keep the promise alive until the SaaS wins and aborts it. + await new Promise((_, reject) => { + signal.addEventListener('abort', () => reject(new Error('Aborted by SaaS'))); + }); + } + + const TIMEOUT_MS = 60_000; + let timer: NodeJS.Timeout; + const result = await new Promise((resolve, reject) => { + timer = setTimeout(() => reject(new Error('Terminal Timeout')), TIMEOUT_MS); + confirm( + { message: `Authorize? (auto-deny in ${TIMEOUT_MS / 1000}s)`, default: false }, + { signal } + ) + .then(resolve) + .catch(reject); + }); + clearTimeout(timer!); + + return { + approved: result, + reason: result + ? undefined + : "The human user typed 'N' in the terminal to reject this action.", + checkedBy: result ? 'terminal' : undefined, + blockedBy: result ? undefined : 'local-decision', + blockedByLabel: 'User Decision (Terminal)', + }; + } catch (err: unknown) { + const error = err as Error; + if ( + error.name === 'AbortError' || + error.message?.includes('Prompt was canceled') || + error.message?.includes('Aborted by SaaS') + ) + throw err; + if (error.message === 'Terminal Timeout') { + return { + approved: false, + reason: 'The terminal prompt timed out without a human response.', + blockedBy: 'local-decision', + }; + } + throw err; + } + })() + ); + } + + // πŸ† RESOLVE THE RACE + if (racePromises.length === 0) { + return { + approved: false, + noApprovalMechanism: true, + reason: + `NODE9 SECURITY INTERVENTION: Action blocked by automated policy [${explainableLabel}].\n` + + `REASON: Action blocked because no approval channels are available. (Native/Browser UI is disabled in config, and this terminal is non-interactive).`, + blockedBy: 'no-approval-mechanism', + blockedByLabel: explainableLabel, + }; + } + + const finalResult = await new Promise((resolve) => { + let resolved = false; + let failures = 0; + const total = racePromises.length; + + const finish = (res: AuthResult) => { + if (!resolved) { + resolved = true; + abortController.abort(); // KILL THE LOSERS + + if (viewerId && internalToken) { + resolveViaDaemon(viewerId, res.approved ? 'allow' : 'deny', internalToken).catch( + () => null + ); + } + resolve(res); + } + }; + + for (const p of racePromises) { + p.then(finish).catch((err) => { + if ( + err.name === 'AbortError' || + err.message?.includes('canceled') || + err.message?.includes('Aborted') + ) + return; + // 'Abandoned' means the browser dashboard closed without deciding. + // Don't silently swallow it β€” that would leave the race promise hanging + // forever when the browser racer is the only channel. + if (err.message === 'Abandoned') { + finish({ + approved: false, + reason: 'Browser dashboard closed without making a decision.', + blockedBy: 'local-decision', + blockedByLabel: 'Browser Dashboard (Abandoned)', + }); + return; + } + failures++; + if (failures === total && !resolved) { + finish({ approved: false, reason: 'All approval channels failed or disconnected.' }); + } + }); + } + }); + + return finalResult; +} + +/** + * Returns the names of all saved profiles in ~/.node9/credentials.json. + * Returns [] when the file doesn't exist or uses the legacy flat format. + */ +export function listCredentialProfiles(): string[] { + try { + const credPath = path.join(os.homedir(), '.node9', 'credentials.json'); + if (!fs.existsSync(credPath)) return []; + const creds = JSON.parse(fs.readFileSync(credPath, 'utf-8')) as Record; + if (!creds.apiKey) return Object.keys(creds).filter((k) => typeof creds[k] === 'object'); + } catch {} + return []; +} + +export function getConfig(): Config { + if (cachedConfig) return cachedConfig; + + const globalPath = path.join(os.homedir(), '.node9', 'config.json'); + const projectPath = path.join(process.cwd(), 'node9.config.json'); + + const globalConfig = tryLoadConfig(globalPath); + const projectConfig = tryLoadConfig(projectPath); + + const mergedSettings = { + ...DEFAULT_CONFIG.settings, + approvers: { ...DEFAULT_CONFIG.settings.approvers }, + }; + const mergedPolicy = { + sandboxPaths: [...DEFAULT_CONFIG.policy.sandboxPaths], + dangerousWords: [...DEFAULT_CONFIG.policy.dangerousWords], + ignoredTools: [...DEFAULT_CONFIG.policy.ignoredTools], + toolInspection: { ...DEFAULT_CONFIG.policy.toolInspection }, + rules: [...DEFAULT_CONFIG.policy.rules], + }; + + const applyLayer = (source: Record | null) => { + if (!source) return; + const s = (source.settings || {}) as Partial; + const p = (source.policy || {}) as Partial; + + if (s.mode !== undefined) mergedSettings.mode = s.mode; + if (s.autoStartDaemon !== undefined) mergedSettings.autoStartDaemon = s.autoStartDaemon; + if (s.enableUndo !== undefined) mergedSettings.enableUndo = s.enableUndo; + if (s.enableHookLogDebug !== undefined) + mergedSettings.enableHookLogDebug = s.enableHookLogDebug; + if (s.approvers) mergedSettings.approvers = { ...mergedSettings.approvers, ...s.approvers }; + + if (p.sandboxPaths) mergedPolicy.sandboxPaths = [...p.sandboxPaths]; + if (p.dangerousWords) mergedPolicy.dangerousWords = [...p.dangerousWords]; + if (p.ignoredTools) mergedPolicy.ignoredTools = [...p.ignoredTools]; + + if (p.toolInspection) + mergedPolicy.toolInspection = { ...mergedPolicy.toolInspection, ...p.toolInspection }; + if (p.rules) mergedPolicy.rules.push(...p.rules); + }; + + applyLayer(globalConfig); + applyLayer(projectConfig); + + if (process.env.NODE9_MODE) mergedSettings.mode = process.env.NODE9_MODE as string; + + mergedPolicy.sandboxPaths = [...new Set(mergedPolicy.sandboxPaths)]; + mergedPolicy.dangerousWords = [...new Set(mergedPolicy.dangerousWords)]; + mergedPolicy.ignoredTools = [...new Set(mergedPolicy.ignoredTools)]; + + cachedConfig = { + settings: mergedSettings, + policy: mergedPolicy, + environments: {}, + }; + return cachedConfig; } function tryLoadConfig(filePath: string): Record | null { if (!fs.existsSync(filePath)) return null; try { - const config = JSON.parse(fs.readFileSync(filePath, 'utf-8')) as Record; - validateConfig(config, filePath); - return config; + return JSON.parse(fs.readFileSync(filePath, 'utf-8')) as Record; } catch { return null; } } -function validateConfig(config: Record, path: string): void { - const allowedTopLevel = ['version', 'settings', 'policy', 'environments']; - Object.keys(config).forEach((key) => { - if (!allowedTopLevel.includes(key)) - console.warn(chalk.yellow(`⚠️ Node9: Unknown top-level key "${key}" in ${path}`)); - }); - if (config.policy && typeof config.policy === 'object') { - const policy = config.policy as Record; - const allowedPolicy = ['dangerousWords', 'ignoredTools', 'toolInspection', 'rules']; - Object.keys(policy).forEach((key) => { - if (!allowedPolicy.includes(key)) - console.warn(chalk.yellow(`⚠️ Node9: Unknown policy key "${key}" in ${path}`)); - }); - } -} - -function mergeWithDefaults(parsed: Record): Config { - return { - settings: { ...DEFAULT_CONFIG.settings, ...((parsed.settings as object) || {}) }, - policy: { ...DEFAULT_CONFIG.policy, ...((parsed.policy as object) || {}) }, - environments: (parsed.environments as Record) || {}, - }; -} - function getActiveEnvironment(config: Config): EnvironmentConfig | null { const env = process.env.NODE_ENV || 'development'; return config.environments[env] ?? null; } -function getCredentials() { +export function getCredentials() { + const DEFAULT_API_URL = 'https://api.node9.ai/api/v1/intercept'; if (process.env.NODE9_API_KEY) { return { apiKey: process.env.NODE9_API_KEY, - apiUrl: process.env.NODE9_API_URL || 'https://api.node9.ai/api/v1/intercept', + apiUrl: process.env.NODE9_API_URL || DEFAULT_API_URL, }; } try { const credPath = path.join(os.homedir(), '.node9', 'credentials.json'); if (fs.existsSync(credPath)) { - const creds = JSON.parse(fs.readFileSync(credPath, 'utf-8')); - return { - apiKey: creds.apiKey, - apiUrl: creds.apiUrl || 'https://api.node9.ai/api/v1/intercept', - }; + const creds = JSON.parse(fs.readFileSync(credPath, 'utf-8')) as Record; + const profileName = process.env.NODE9_PROFILE || 'default'; + const profile = creds[profileName] as Record | undefined; + + if (profile?.apiKey) { + return { + apiKey: profile.apiKey as string, + apiUrl: (profile.apiUrl as string) || DEFAULT_API_URL, + }; + } + if (creds.apiKey) { + return { + apiKey: creds.apiKey as string, + apiUrl: (creds.apiUrl as string) || DEFAULT_API_URL, + }; + } } } catch {} return null; } export async function authorizeAction(toolName: string, args: unknown): Promise { - if ((await evaluatePolicy(toolName, args)) === 'allow') return true; - const creds = getCredentials(); - const envConfig = getActiveEnvironment(getConfig()); - if (creds && creds.apiKey) { - return await callNode9SaaS(toolName, args, creds, envConfig?.slackChannel); - } - if (process.stdout.isTTY) { - console.log(chalk.bgRed.white.bold(` πŸ›‘ NODE9 INTERCEPTOR `)); - console.log(`${chalk.bold('Action:')} ${chalk.red(toolName)}`); - const argsPreview = JSON.stringify(args, null, 2); - console.log( - `${chalk.bold('Args:')}\n${chalk.gray(argsPreview.length > 500 ? argsPreview.slice(0, 500) + '\n ... (truncated)' : argsPreview)}` - ); - return await confirm({ message: 'Authorize?', default: false }); - } - throw new Error( - `[Node9] Blocked dangerous action: ${toolName}. Run 'node9 login' to enable remote approval.` - ); + const result = await authorizeHeadless(toolName, args, true); + return result.approved; } -async function callNode9SaaS( +export interface CloudApprovalResult { + approved: boolean; + reason?: string; + remoteApprovalOnly?: boolean; +} + +/** + * STEP 1: The Handshake. Runs BEFORE the local UI is spawned to check for locks. + */ +async function initNode9SaaS( toolName: string, args: unknown, creds: { apiKey: string; apiUrl: string }, - slackChannel?: string -): Promise { + slackChannel?: string, + meta?: { agent?: string; mcpServer?: string } +): Promise<{ + pending: boolean; + requestId?: string; + approved?: boolean; + reason?: string; + remoteApprovalOnly?: boolean; +}> { + const controller = new AbortController(); + const timeout = setTimeout(() => controller.abort(), 10000); + try { - const controller = new AbortController(); - const timeout = setTimeout(() => controller.abort(), 35000); const response = await fetch(creds.apiUrl, { method: 'POST', headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${creds.apiKey}` }, @@ -444,18 +1199,73 @@ async function callNode9SaaS( toolName, args, slackChannel, - context: { hostname: os.hostname(), cwd: process.cwd(), platform: os.platform() }, + context: { + agent: meta?.agent, + mcpServer: meta?.mcpServer, + hostname: os.hostname(), + cwd: process.cwd(), + platform: os.platform(), + }, }), signal: controller.signal, }); + + if (!response.ok) throw new Error(`HTTP ${response.status}`); + + // FIX: Using TypeScript 'as' casting to resolve the unknown type error + return (await response.json()) as { + pending: boolean; + requestId?: string; + approved?: boolean; + reason?: string; + remoteApprovalOnly?: boolean; + }; + } finally { clearTimeout(timeout); - if (!response.ok) throw new Error(`API responded with Status ${response.status}`); - const data = (await response.json()) as { approved: boolean; message?: string }; - if (data.approved) return true; - else return false; - } catch (error: unknown) { - const msg = error instanceof Error ? error.message : String(error); - console.error(chalk.red(`❌ Cloud Error: ${msg}`)); - return false; } } + +/** + * STEP 2: The Poller. Runs INSIDE the Race Engine. + */ +async function pollNode9SaaS( + requestId: string, + creds: { apiKey: string; apiUrl: string }, + signal: AbortSignal +): Promise { + const statusUrl = `${creds.apiUrl}/status/${requestId}`; + const POLL_INTERVAL_MS = 1000; + const POLL_DEADLINE = Date.now() + 10 * 60 * 1000; + + while (Date.now() < POLL_DEADLINE) { + if (signal.aborted) throw new Error('Aborted'); + await new Promise((r) => setTimeout(r, POLL_INTERVAL_MS)); + + try { + const pollCtrl = new AbortController(); + const pollTimer = setTimeout(() => pollCtrl.abort(), 5000); + const statusRes = await fetch(statusUrl, { + headers: { Authorization: `Bearer ${creds.apiKey}` }, + signal: pollCtrl.signal, + }); + clearTimeout(pollTimer); + + if (!statusRes.ok) continue; + + // FIX: Using TypeScript 'as' casting to resolve the unknown type error + const { status, reason } = (await statusRes.json()) as { status: string; reason?: string }; + + if (status === 'APPROVED') { + console.error(chalk.green('βœ… Approved via Cloud.\n')); + return { approved: true, reason }; + } + if (status === 'DENIED' || status === 'AUTO_BLOCKED' || status === 'TIMED_OUT') { + console.error(chalk.red('❌ Denied via Cloud.\n')); + return { approved: false, reason }; + } + } catch { + /* transient network error */ + } + } + return { approved: false, reason: 'Cloud approval timed out after 10 minutes.' }; +} diff --git a/src/daemon/html.d.ts b/src/daemon/html.d.ts new file mode 100644 index 0000000..448f7d1 --- /dev/null +++ b/src/daemon/html.d.ts @@ -0,0 +1,4 @@ +declare module '*.html' { + const content: string; + export default content; +} diff --git a/src/daemon/index.ts b/src/daemon/index.ts new file mode 100644 index 0000000..50627f6 --- /dev/null +++ b/src/daemon/index.ts @@ -0,0 +1,576 @@ +// src/daemon/index.ts β€” Node9 localhost approval server +import { UI_HTML_TEMPLATE } from './ui'; +import http from 'http'; +import fs from 'fs'; +import path from 'path'; +import os from 'os'; +import { spawn } from 'child_process'; +import { randomUUID } from 'crypto'; +import chalk from 'chalk'; +import { getGlobalSettings } from '../core'; + +export const DAEMON_PORT = 7391; +export const DAEMON_HOST = '127.0.0.1'; +const homeDir = os.homedir(); +export const DAEMON_PID_FILE = path.join(homeDir, '.node9', 'daemon.pid'); +export const DECISIONS_FILE = path.join(homeDir, '.node9', 'decisions.json'); +const GLOBAL_CONFIG_FILE = path.join(homeDir, '.node9', 'config.json'); +const CREDENTIALS_FILE = path.join(homeDir, '.node9', 'credentials.json'); + +interface AuditEntry { + ts: string; + tool: string; + args: unknown; + decision: string; + source: string; +} + +export const AUDIT_LOG_FILE = path.join(homeDir, '.node9', 'audit.log'); +const TRUST_FILE = path.join(homeDir, '.node9', 'trust.json'); + +// ── Atomic File Writer (Fixes Task 0.1) ────────────────────────────────── +function atomicWriteSync(filePath: string, data: string, options?: fs.WriteFileOptions): void { + const dir = path.dirname(filePath); + if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true }); + const tmpPath = `${filePath}.${randomUUID()}.tmp`; + fs.writeFileSync(tmpPath, data, options); + fs.renameSync(tmpPath, filePath); +} + +function writeTrustEntry(toolName: string, durationMs: number): void { + try { + interface TrustFile { + entries: { tool: string; expiry: number }[]; + } + let trust: TrustFile = { entries: [] }; + try { + if (fs.existsSync(TRUST_FILE)) + trust = JSON.parse(fs.readFileSync(TRUST_FILE, 'utf-8')) as TrustFile; + } catch {} + trust.entries = trust.entries.filter((e) => e.tool !== toolName && e.expiry > Date.now()); + trust.entries.push({ tool: toolName, expiry: Date.now() + durationMs }); + atomicWriteSync(TRUST_FILE, JSON.stringify(trust, null, 2)); + } catch {} +} + +const TRUST_DURATIONS: Record = { + '30m': 30 * 60_000, + '1h': 60 * 60_000, + '2h': 2 * 60 * 60_000, +}; + +const SECRET_KEY_RE = /password|secret|token|key|apikey|credential|auth/i; + +function redactArgs(value: unknown): unknown { + if (!value || typeof value !== 'object') return value; + if (Array.isArray(value)) return value.map(redactArgs); + const result: Record = {}; + for (const [k, v] of Object.entries(value as Record)) { + result[k] = SECRET_KEY_RE.test(k) ? '[REDACTED]' : redactArgs(v); + } + return result; +} + +function appendAuditLog(data: { toolName: string; args: unknown; decision: string }): void { + try { + const entry: AuditEntry = { + ts: new Date().toISOString(), + tool: data.toolName, + args: redactArgs(data.args), + decision: data.decision, + source: 'daemon', + }; + const dir = path.dirname(AUDIT_LOG_FILE); + if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true }); + fs.appendFileSync(AUDIT_LOG_FILE, JSON.stringify(entry) + '\n'); + } catch {} +} + +function getAuditHistory(limit = 20): AuditEntry[] { + try { + if (!fs.existsSync(AUDIT_LOG_FILE)) return []; + const lines = fs.readFileSync(AUDIT_LOG_FILE, 'utf-8').trim().split('\n'); + if (lines.length === 1 && lines[0] === '') return []; + return lines + .slice(-limit) + .map((l) => JSON.parse(l)) + .reverse(); + } catch { + return []; + } +} + +const AUTO_DENY_MS = 120_000; + +function getOrgName(): string | null { + try { + if (fs.existsSync(CREDENTIALS_FILE)) { + return 'Node9 Cloud'; + } + } catch {} + return null; +} + +// True when the daemon was launched automatically by the hook/smart-runner. +const autoStarted = process.env.NODE9_AUTO_STARTED === '1'; + +function hasStoredSlackKey(): boolean { + return fs.existsSync(CREDENTIALS_FILE); +} + +function writeGlobalSetting(key: string, value: unknown): void { + let config: Record = {}; + try { + if (fs.existsSync(GLOBAL_CONFIG_FILE)) { + config = JSON.parse(fs.readFileSync(GLOBAL_CONFIG_FILE, 'utf-8')) as Record; + } + } catch {} + if (!config.settings || typeof config.settings !== 'object') config.settings = {}; + (config.settings as Record)[key] = value; + atomicWriteSync(GLOBAL_CONFIG_FILE, JSON.stringify(config, null, 2), { mode: 0o600 }); +} + +type Decision = 'allow' | 'deny' | 'abandoned'; + +interface PendingEntry { + id: string; + toolName: string; + args: unknown; + agent?: string; + mcpServer?: string; + timestamp: number; + slackDelegated: boolean; + timer: ReturnType; + waiter: ((d: Decision) => void) | null; + earlyDecision: Decision | null; +} + +const pending = new Map(); +const sseClients = new Set(); +let abandonTimer: ReturnType | null = null; +let daemonServer: http.Server | null = null; +let hadBrowserClient = false; // true once at least one SSE client has connected + +function abandonPending() { + abandonTimer = null; + pending.forEach((entry, id) => { + clearTimeout(entry.timer); + if (entry.waiter) entry.waiter('abandoned'); + else entry.earlyDecision = 'abandoned'; + pending.delete(id); + broadcast('remove', { id }); + }); + + if (autoStarted) { + try { + fs.unlinkSync(DAEMON_PID_FILE); + } catch {} + setTimeout(() => { + daemonServer?.close(); + process.exit(0); + }, 200); + } +} + +function broadcast(event: string, data: unknown) { + const msg = `event: ${event}\ndata: ${JSON.stringify(data)}\n\n`; + sseClients.forEach((client) => { + try { + client.write(msg); + } catch { + sseClients.delete(client); + } + }); +} + +function openBrowser(url: string) { + try { + const args = + process.platform === 'darwin' + ? ['open', url] + : process.platform === 'win32' + ? ['cmd', '/c', 'start', '', url] + : ['xdg-open', url]; + spawn(args[0], args.slice(1), { detached: true, stdio: 'ignore' }).unref(); + } catch {} +} + +function readBody(req: http.IncomingMessage): Promise { + return new Promise((resolve) => { + let body = ''; + req.on('data', (chunk) => (body += chunk)); + req.on('end', () => resolve(body)); + }); +} + +function readPersistentDecisions(): Record { + try { + if (fs.existsSync(DECISIONS_FILE)) { + return JSON.parse(fs.readFileSync(DECISIONS_FILE, 'utf-8')) as Record< + string, + 'allow' | 'deny' + >; + } + } catch {} + return {}; +} + +function writePersistentDecision(toolName: string, decision: 'allow' | 'deny') { + try { + const decisions = readPersistentDecisions(); + decisions[toolName] = decision; + atomicWriteSync(DECISIONS_FILE, JSON.stringify(decisions, null, 2)); + broadcast('decisions', decisions); + } catch {} +} + +export function startDaemon(): void { + const csrfToken = randomUUID(); + const internalToken = randomUUID(); + const UI_HTML = UI_HTML_TEMPLATE.replace('{{CSRF_TOKEN}}', csrfToken); + const validToken = (req: http.IncomingMessage) => req.headers['x-node9-token'] === csrfToken; + + // ── Graceful Idle Timeout (Fixes Task 0.4) ────────────────────────────── + const IDLE_TIMEOUT_MS = 12 * 60 * 60 * 1000; // 12 hours + let idleTimer: NodeJS.Timeout; + function resetIdleTimer() { + if (idleTimer) clearTimeout(idleTimer); + idleTimer = setTimeout(() => { + if (autoStarted) { + try { + fs.unlinkSync(DAEMON_PID_FILE); + } catch {} + } + process.exit(0); + }, IDLE_TIMEOUT_MS); + idleTimer.unref(); // Don't hold the process open just for the timer + } + resetIdleTimer(); // Start the clock + + const server = http.createServer(async (req, res) => { + const { pathname } = new URL(req.url || '/', `http://${req.headers.host}`); + + if (req.method === 'GET' && pathname === '/') { + res.writeHead(200, { 'Content-Type': 'text/html' }); + return res.end(UI_HTML); + } + + if (req.method === 'GET' && pathname === '/events') { + res.writeHead(200, { + 'Content-Type': 'text/event-stream', + 'Cache-Control': 'no-cache', + Connection: 'keep-alive', + }); + if (abandonTimer) { + clearTimeout(abandonTimer); + abandonTimer = null; + } + hadBrowserClient = true; + sseClients.add(res); + res.write( + `event: init\ndata: ${JSON.stringify({ + requests: Array.from(pending.values()).map((e) => ({ + id: e.id, + toolName: e.toolName, + args: e.args, + slackDelegated: e.slackDelegated, + timestamp: e.timestamp, + agent: e.agent, + mcpServer: e.mcpServer, + })), + orgName: getOrgName(), + autoDenyMs: AUTO_DENY_MS, + })}\n\n` + ); + res.write(`event: decisions\ndata: ${JSON.stringify(readPersistentDecisions())}\n\n`); + return req.on('close', () => { + sseClients.delete(res); + if (sseClients.size === 0 && pending.size > 0) { + // Give 10s if browser was already open (page reload / brief disconnect), + // 15s on cold-start (browser needs time to open and connect SSE). + // 2s was too short: auto-opened browsers often reconnect SSE mid-load, + // causing a disconnect+reconnect that exceeded the 2s window and + // abandoned the pending request before the user could see it. + abandonTimer = setTimeout(abandonPending, hadBrowserClient ? 10_000 : 15_000); + } + }); + } + + if (req.method === 'POST' && pathname === '/check') { + try { + resetIdleTimer(); // Agent is active, reset the shutdown clock + + const body = await readBody(req); + if (body.length > 65_536) return res.writeHead(413).end(); + const { toolName, args, slackDelegated = false, agent, mcpServer } = JSON.parse(body); + const id = randomUUID(); + const entry: PendingEntry = { + id, + toolName, + args, + agent: typeof agent === 'string' ? agent : undefined, + mcpServer: typeof mcpServer === 'string' ? mcpServer : undefined, + slackDelegated: !!slackDelegated, + timestamp: Date.now(), + earlyDecision: null, + waiter: null, + timer: setTimeout(() => { + if (pending.has(id)) { + const e = pending.get(id)!; + appendAuditLog({ + toolName: e.toolName, + args: e.args, + decision: 'auto-deny', + }); + if (e.waiter) e.waiter('deny'); + else e.earlyDecision = 'deny'; + pending.delete(id); + broadcast('remove', { id }); + } + }, AUTO_DENY_MS), + }; + pending.set(id, entry); + broadcast('add', { + id, + toolName, + args, + slackDelegated: entry.slackDelegated, + agent: entry.agent, + mcpServer: entry.mcpServer, + }); + // When auto-started, the CLI already called openBrowserLocal() before + // the request was registered, so the browser is already opening. + // Skip here to avoid opening a duplicate tab. + if (sseClients.size === 0 && !autoStarted) openBrowser(`http://127.0.0.1:${DAEMON_PORT}/`); + res.writeHead(200, { 'Content-Type': 'application/json' }); + return res.end(JSON.stringify({ id })); + } catch { + res.writeHead(400).end(); + } + } + + if (req.method === 'GET' && pathname.startsWith('/wait/')) { + const id = pathname.split('/').pop()!; + const entry = pending.get(id); + if (!entry) return res.writeHead(404).end(); + if (entry.earlyDecision) { + res.writeHead(200, { 'Content-Type': 'application/json' }); + return res.end(JSON.stringify({ decision: entry.earlyDecision })); + } + entry.waiter = (d) => { + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ decision: d })); + }; + return; + } + + if (req.method === 'POST' && pathname.startsWith('/decision/')) { + if (!validToken(req)) return res.writeHead(403).end(); + try { + const id = pathname.split('/').pop()!; + const entry = pending.get(id); + if (!entry) return res.writeHead(404).end(); + const { decision, persist, trustDuration } = JSON.parse(await readBody(req)) as { + decision: string; + persist?: boolean; + trustDuration?: string; + }; + + // Trust session + if (decision === 'trust' && trustDuration) { + const ms = TRUST_DURATIONS[trustDuration] ?? 60 * 60_000; + writeTrustEntry(entry.toolName, ms); + appendAuditLog({ + toolName: entry.toolName, + args: entry.args, + decision: `trust:${trustDuration}`, + }); + clearTimeout(entry.timer); + if (entry.waiter) entry.waiter('allow'); + else entry.earlyDecision = 'allow'; + pending.delete(id); + broadcast('remove', { id }); + res.writeHead(200); + return res.end(JSON.stringify({ ok: true })); + } + + const resolvedDecision = decision === 'allow' || decision === 'deny' ? decision : 'deny'; + if (persist) writePersistentDecision(entry.toolName, resolvedDecision); + appendAuditLog({ + toolName: entry.toolName, + args: entry.args, + decision: resolvedDecision, + }); + clearTimeout(entry.timer); + if (entry.waiter) entry.waiter(resolvedDecision); + else entry.earlyDecision = resolvedDecision; + pending.delete(id!); + broadcast('remove', { id }); + res.writeHead(200); + return res.end(JSON.stringify({ ok: true })); + } catch { + res.writeHead(400).end(); + } + } + + if (req.method === 'GET' && pathname === '/settings') { + const s = getGlobalSettings(); + res.writeHead(200, { 'Content-Type': 'application/json' }); + return res.end(JSON.stringify({ ...s, autoStarted })); + } + + // ── Updated POST /settings to handle new config schema ───────────────── + if (req.method === 'POST' && pathname === '/settings') { + if (!validToken(req)) return res.writeHead(403).end(); + try { + const body = await readBody(req); + const data = JSON.parse(body); + if (data.autoStartDaemon !== undefined) + writeGlobalSetting('autoStartDaemon', data.autoStartDaemon); + if (data.slackEnabled !== undefined) writeGlobalSetting('slackEnabled', data.slackEnabled); + if (data.enableTrustSessions !== undefined) + writeGlobalSetting('enableTrustSessions', data.enableTrustSessions); + if (data.enableUndo !== undefined) writeGlobalSetting('enableUndo', data.enableUndo); + if (data.enableHookLogDebug !== undefined) + writeGlobalSetting('enableHookLogDebug', data.enableHookLogDebug); + if (data.approvers !== undefined) writeGlobalSetting('approvers', data.approvers); + + res.writeHead(200); + return res.end(JSON.stringify({ ok: true })); + } catch { + res.writeHead(400).end(); + } + } + + if (req.method === 'GET' && pathname === '/slack-status') { + const s = getGlobalSettings(); + res.writeHead(200, { 'Content-Type': 'application/json' }); + return res.end(JSON.stringify({ hasKey: hasStoredSlackKey(), enabled: s.slackEnabled })); + } + + if (req.method === 'POST' && pathname === '/slack-key') { + if (!validToken(req)) return res.writeHead(403).end(); + try { + const { apiKey } = JSON.parse(await readBody(req)); + atomicWriteSync( + CREDENTIALS_FILE, + JSON.stringify({ apiKey, apiUrl: 'https://api.node9.ai/api/v1/intercept' }, null, 2), + { mode: 0o600 } + ); + broadcast('slack-status', { hasKey: true, enabled: getGlobalSettings().slackEnabled }); + res.writeHead(200); + return res.end(JSON.stringify({ ok: true })); + } catch { + res.writeHead(400).end(); + } + } + + if (req.method === 'DELETE' && pathname.startsWith('/decisions/')) { + if (!validToken(req)) return res.writeHead(403).end(); + try { + const toolName = decodeURIComponent(pathname.split('/').pop()!); + const decisions = readPersistentDecisions(); + delete decisions[toolName]; + atomicWriteSync(DECISIONS_FILE, JSON.stringify(decisions, null, 2)); + broadcast('decisions', decisions); + res.writeHead(200); + return res.end(JSON.stringify({ ok: true })); + } catch { + res.writeHead(400).end(); + } + } + + if (req.method === 'POST' && pathname.startsWith('/resolve/')) { + const internalAuth = req.headers['x-node9-internal']; + if (internalAuth !== internalToken) return res.writeHead(403).end(); + try { + const id = pathname.split('/').pop()!; + const entry = pending.get(id); + if (!entry) return res.writeHead(404).end(); + const { decision } = JSON.parse(await readBody(req)); + appendAuditLog({ + toolName: entry.toolName, + args: entry.args, + decision, + }); + clearTimeout(entry.timer); + if (entry.waiter) entry.waiter(decision); + else entry.earlyDecision = decision; + pending.delete(id); + broadcast('remove', { id }); + res.writeHead(200); + return res.end(JSON.stringify({ ok: true })); + } catch { + res.writeHead(400).end(); + } + } + + if (req.method === 'GET' && pathname === '/audit') { + res.writeHead(200, { 'Content-Type': 'application/json' }); + return res.end(JSON.stringify(getAuditHistory())); + } + + res.writeHead(404).end(); + }); + + daemonServer = server; + + // ── Port Conflict Resolution (Fixes Task 0.2) ─────────────────────────── + server.on('error', (e: NodeJS.ErrnoException) => { + if (e.code === 'EADDRINUSE') { + try { + if (fs.existsSync(DAEMON_PID_FILE)) { + const { pid } = JSON.parse(fs.readFileSync(DAEMON_PID_FILE, 'utf-8')); + process.kill(pid, 0); // Throws if process is dead + // If we reach here, a legitimate daemon is running. Safely exit. + return process.exit(0); + } + } catch { + // Zombie PID detected. Clean up and resurrect server. + try { + fs.unlinkSync(DAEMON_PID_FILE); + } catch {} + server.listen(DAEMON_PORT, DAEMON_HOST); + return; + } + } + console.error(chalk.red('\nπŸ›‘ Node9 Daemon Error:'), e.message); + process.exit(1); + }); + + server.listen(DAEMON_PORT, DAEMON_HOST, () => { + atomicWriteSync( + DAEMON_PID_FILE, + JSON.stringify({ pid: process.pid, port: DAEMON_PORT, internalToken, autoStarted }), + { mode: 0o600 } + ); + console.log(chalk.green(`πŸ›‘οΈ Node9 Guard LIVE: http://127.0.0.1:${DAEMON_PORT}`)); + }); +} + +export function stopDaemon(): void { + if (!fs.existsSync(DAEMON_PID_FILE)) return console.log(chalk.yellow('Not running.')); + try { + const { pid } = JSON.parse(fs.readFileSync(DAEMON_PID_FILE, 'utf-8')); + process.kill(pid, 'SIGTERM'); + console.log(chalk.green('βœ… Stopped.')); + } catch { + console.log(chalk.gray('Cleaned up stale PID file.')); + } finally { + try { + fs.unlinkSync(DAEMON_PID_FILE); + } catch {} + } +} + +export function daemonStatus(): void { + if (!fs.existsSync(DAEMON_PID_FILE)) + return console.log(chalk.yellow('Node9 daemon: not running')); + try { + const { pid } = JSON.parse(fs.readFileSync(DAEMON_PID_FILE, 'utf-8')); + process.kill(pid, 0); + console.log(chalk.green('Node9 daemon: running')); + } catch { + console.log(chalk.yellow('Node9 daemon: not running (stale PID)')); + } +} diff --git a/src/daemon/ui.html b/src/daemon/ui.html new file mode 100644 index 0000000..692b854 --- /dev/null +++ b/src/daemon/ui.html @@ -0,0 +1,961 @@ + + + + + + Node9 Security Guard + + + +
+
+ +

Node9 Guard

+
Waiting...
+
+ +
+
+
+ ⚠️ Auto-start is off β€” daemon started manually. Run "node9 daemon stop" to stop it, or + enable Auto-start in Settings. +
+
Pending Approvals
+
+
+ ✨ + All clear β€” no pending tool calls. +
+
+ + +
+
+ + + + + + diff --git a/src/daemon/ui.ts b/src/daemon/ui.ts new file mode 100644 index 0000000..8c5cda6 --- /dev/null +++ b/src/daemon/ui.ts @@ -0,0 +1,2 @@ +import content from './ui.html'; +export const UI_HTML_TEMPLATE = content; diff --git a/src/index.ts b/src/index.ts index c5ef43f..76aa36e 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,3 +1,4 @@ +// src/index.ts import { authorizeAction } from './core'; /** diff --git a/src/setup.ts b/src/setup.ts index 899ed17..8e75e9f 100644 --- a/src/setup.ts +++ b/src/setup.ts @@ -61,6 +61,26 @@ interface GeminiSettings { [key: string]: unknown; } +function printDaemonTip(): void { + console.log( + chalk.cyan('\n πŸ’‘ Node9 will protect you automatically using Native OS popups.') + + chalk.white('\n To view your history or manage persistent rules, run:') + + chalk.green('\n node9 daemon --openui') + ); +} + +/** + * Returns a shell-safe hook command that works regardless of the user's $PATH. + * Hooks run in a restricted shell (no .bashrc / nvm init), so bare "node9" + * is often not found. Using the full node + cli.js paths avoids this. + */ +function fullPathCommand(subcommand: string): string { + if (process.env.NODE9_TESTING === '1') return `node9 ${subcommand}`; + const nodeExec = process.execPath; // e.g. /home/user/.nvm/.../bin/node + const cliScript = process.argv[1]; // e.g. /.../dist/cli.js + return `${nodeExec} ${cliScript} ${subcommand}`; +} + function readJson(filePath: string): T | null { try { if (fs.existsSync(filePath)) { @@ -95,26 +115,26 @@ export async function setupClaude(): Promise { if (!settings.hooks) settings.hooks = {}; const hasPreHook = settings.hooks.PreToolUse?.some((m) => - m.hooks.some((h) => h.command?.includes('node9 check')) + m.hooks.some((h) => h.command?.includes('node9 check') || h.command?.includes('cli.js check')) ); if (!hasPreHook) { if (!settings.hooks.PreToolUse) settings.hooks.PreToolUse = []; settings.hooks.PreToolUse.push({ matcher: '.*', - hooks: [{ type: 'command', command: 'node9 check', timeout: 60 }], + hooks: [{ type: 'command', command: fullPathCommand('check'), timeout: 60 }], }); console.log(chalk.green(' βœ… PreToolUse hook added β†’ node9 check')); anythingChanged = true; } const hasPostHook = settings.hooks.PostToolUse?.some((m) => - m.hooks.some((h) => h.command?.includes('node9 log')) + m.hooks.some((h) => h.command?.includes('node9 log') || h.command?.includes('cli.js log')) ); if (!hasPostHook) { if (!settings.hooks.PostToolUse) settings.hooks.PostToolUse = []; settings.hooks.PostToolUse.push({ matcher: '.*', - hooks: [{ type: 'command', command: 'node9 log' }], + hooks: [{ type: 'command', command: fullPathCommand('log'), timeout: 600 }], }); console.log(chalk.green(' βœ… PostToolUse hook added β†’ node9 log')); anythingChanged = true; @@ -126,27 +146,25 @@ export async function setupClaude(): Promise { } // ── Step 2: Modifications β€” show preview and ask ───────────────────────── - const serversToWrap: Array<{ name: string; originalCmd: string }> = []; + const serversToWrap: Array<{ name: string; originalCmd: string; parts: string[] }> = []; for (const [name, server] of Object.entries(servers)) { if (!server.command || server.command === 'node9') continue; - serversToWrap.push({ - name, - originalCmd: [server.command, ...(server.args ?? [])].join(' '), - }); + const parts = [server.command, ...(server.args ?? [])]; + serversToWrap.push({ name, originalCmd: parts.join(' '), parts }); } if (serversToWrap.length > 0) { console.log(chalk.bold('The following existing entries will be modified:\n')); console.log(chalk.white(` ${mcpPath}`)); for (const { name, originalCmd } of serversToWrap) { - console.log(chalk.gray(` β€’ ${name}: "npx ${originalCmd}" β†’ node9 proxy "${originalCmd}"`)); + console.log(chalk.gray(` β€’ ${name}: "${originalCmd}" β†’ node9 ${originalCmd}`)); } console.log(''); const proceed = await confirm({ message: 'Wrap these MCP servers?', default: true }); if (proceed) { - for (const { name, originalCmd } of serversToWrap) { - servers[name] = { ...servers[name], command: 'node9', args: ['proxy', originalCmd] }; + for (const { name, parts } of serversToWrap) { + servers[name] = { ...servers[name], command: 'node9', args: parts }; } claudeConfig.mcpServers = servers; writeJson(mcpPath, claudeConfig); @@ -161,12 +179,14 @@ export async function setupClaude(): Promise { // ── Summary ─────────────────────────────────────────────────────────────── if (!anythingChanged && serversToWrap.length === 0) { console.log(chalk.blue('ℹ️ Node9 is already fully configured for Claude Code.')); + printDaemonTip(); return; } if (anythingChanged) { console.log(chalk.green.bold('πŸ›‘οΈ Node9 is now protecting Claude Code!')); console.log(chalk.gray(' Restart Claude Code for changes to take effect.')); + printDaemonTip(); } } @@ -186,7 +206,9 @@ export async function setupGemini(): Promise { const hasBeforeHook = Array.isArray(settings.hooks.BeforeTool) && - settings.hooks.BeforeTool.some((m) => m.hooks.some((h) => h.command?.includes('node9 check'))); + settings.hooks.BeforeTool.some((m) => + m.hooks.some((h) => h.command?.includes('node9 check') || h.command?.includes('cli.js check')) + ); if (!hasBeforeHook) { if (!settings.hooks.BeforeTool) settings.hooks.BeforeTool = []; // If it was an object (old format), we re-initialize it as an array @@ -194,7 +216,14 @@ export async function setupGemini(): Promise { settings.hooks.BeforeTool.push({ matcher: '.*', - hooks: [{ name: 'node9-check', type: 'command', command: 'node9 check', timeout: 60000 }], + hooks: [ + { + name: 'node9-check', + type: 'command', + command: fullPathCommand('check'), + timeout: 600000, + }, + ], }); console.log(chalk.green(' βœ… BeforeTool hook added β†’ node9 check')); anythingChanged = true; @@ -202,7 +231,9 @@ export async function setupGemini(): Promise { const hasAfterHook = Array.isArray(settings.hooks.AfterTool) && - settings.hooks.AfterTool.some((m) => m.hooks.some((h) => h.command?.includes('node9 log'))); + settings.hooks.AfterTool.some((m) => + m.hooks.some((h) => h.command?.includes('node9 log') || h.command?.includes('cli.js log')) + ); if (!hasAfterHook) { if (!settings.hooks.AfterTool) settings.hooks.AfterTool = []; // If it was an object (old format), we re-initialize it as an array @@ -210,7 +241,7 @@ export async function setupGemini(): Promise { settings.hooks.AfterTool.push({ matcher: '.*', - hooks: [{ name: 'node9-log', type: 'command', command: 'node9 log' }], + hooks: [{ name: 'node9-log', type: 'command', command: fullPathCommand('log') }], }); console.log(chalk.green(' βœ… AfterTool hook added β†’ node9 log')); anythingChanged = true; @@ -222,27 +253,25 @@ export async function setupGemini(): Promise { } // ── Step 2: Modifications β€” show preview and ask ───────────────────────── - const serversToWrap: Array<{ name: string; originalCmd: string }> = []; + const serversToWrap: Array<{ name: string; originalCmd: string; parts: string[] }> = []; for (const [name, server] of Object.entries(servers)) { if (!server.command || server.command === 'node9') continue; - serversToWrap.push({ - name, - originalCmd: [server.command, ...(server.args ?? [])].join(' '), - }); + const parts = [server.command, ...(server.args ?? [])]; + serversToWrap.push({ name, originalCmd: parts.join(' '), parts }); } if (serversToWrap.length > 0) { console.log(chalk.bold('The following existing entries will be modified:\n')); console.log(chalk.white(` ${settingsPath} (mcpServers)`)); for (const { name, originalCmd } of serversToWrap) { - console.log(chalk.gray(` β€’ ${name}: "${originalCmd}" β†’ node9 proxy "${originalCmd}"`)); + console.log(chalk.gray(` β€’ ${name}: "${originalCmd}" β†’ node9 ${originalCmd}`)); } console.log(''); const proceed = await confirm({ message: 'Wrap these MCP servers?', default: true }); if (proceed) { - for (const { name, originalCmd } of serversToWrap) { - servers[name] = { ...servers[name], command: 'node9', args: ['proxy', originalCmd] }; + for (const { name, parts } of serversToWrap) { + servers[name] = { ...servers[name], command: 'node9', args: parts }; } settings.mcpServers = servers; writeJson(settingsPath, settings); @@ -257,12 +286,14 @@ export async function setupGemini(): Promise { // ── Summary ─────────────────────────────────────────────────────────────── if (!anythingChanged && serversToWrap.length === 0) { console.log(chalk.blue('ℹ️ Node9 is already fully configured for Gemini CLI.')); + printDaemonTip(); return; } if (anythingChanged) { console.log(chalk.green.bold('πŸ›‘οΈ Node9 is now protecting Gemini CLI!')); console.log(chalk.gray(' Restart Gemini CLI for changes to take effect.')); + printDaemonTip(); } } @@ -302,21 +333,21 @@ export async function setupCursor(): Promise { if (!hooksFile.hooks) hooksFile.hooks = {}; const hasPreHook = hooksFile.hooks.preToolUse?.some( - (h) => h.command === 'node9' && h.args?.includes('check') + (h) => (h.command === 'node9' && h.args?.includes('check')) || h.command?.includes('cli.js') ); if (!hasPreHook) { if (!hooksFile.hooks.preToolUse) hooksFile.hooks.preToolUse = []; - hooksFile.hooks.preToolUse.push({ command: 'node9', args: ['check'] }); + hooksFile.hooks.preToolUse.push({ command: fullPathCommand('check') }); console.log(chalk.green(' βœ… preToolUse hook added β†’ node9 check')); anythingChanged = true; } const hasPostHook = hooksFile.hooks.postToolUse?.some( - (h) => h.command === 'node9' && h.args?.includes('log') + (h) => (h.command === 'node9' && h.args?.includes('log')) || h.command?.includes('cli.js') ); if (!hasPostHook) { if (!hooksFile.hooks.postToolUse) hooksFile.hooks.postToolUse = []; - hooksFile.hooks.postToolUse.push({ command: 'node9', args: ['log'] }); + hooksFile.hooks.postToolUse.push({ command: fullPathCommand('log') }); console.log(chalk.green(' βœ… postToolUse hook added β†’ node9 log')); anythingChanged = true; } @@ -327,27 +358,25 @@ export async function setupCursor(): Promise { } // ── Step 2: Modifications β€” show preview and ask ───────────────────────── - const serversToWrap: Array<{ name: string; originalCmd: string }> = []; + const serversToWrap: Array<{ name: string; originalCmd: string; parts: string[] }> = []; for (const [name, server] of Object.entries(servers)) { if (!server.command || server.command === 'node9') continue; - serversToWrap.push({ - name, - originalCmd: [server.command, ...(server.args ?? [])].join(' '), - }); + const parts = [server.command, ...(server.args ?? [])]; + serversToWrap.push({ name, originalCmd: parts.join(' '), parts }); } if (serversToWrap.length > 0) { console.log(chalk.bold('The following existing entries will be modified:\n')); console.log(chalk.white(` ${mcpPath}`)); for (const { name, originalCmd } of serversToWrap) { - console.log(chalk.gray(` β€’ ${name}: "${originalCmd}" β†’ node9 proxy "${originalCmd}"`)); + console.log(chalk.gray(` β€’ ${name}: "${originalCmd}" β†’ node9 ${originalCmd}`)); } console.log(''); const proceed = await confirm({ message: 'Wrap these MCP servers?', default: true }); if (proceed) { - for (const { name, originalCmd } of serversToWrap) { - servers[name] = { ...servers[name], command: 'node9', args: ['proxy', originalCmd] }; + for (const { name, parts } of serversToWrap) { + servers[name] = { ...servers[name], command: 'node9', args: parts }; } mcpConfig.mcpServers = servers; writeJson(mcpPath, mcpConfig); @@ -362,11 +391,13 @@ export async function setupCursor(): Promise { // ── Summary ─────────────────────────────────────────────────────────────── if (!anythingChanged && serversToWrap.length === 0) { console.log(chalk.blue('ℹ️ Node9 is already fully configured for Cursor.')); + printDaemonTip(); return; } if (anythingChanged) { console.log(chalk.green.bold('πŸ›‘οΈ Node9 is now protecting Cursor!')); console.log(chalk.gray(' Restart Cursor for changes to take effect.')); + printDaemonTip(); } } diff --git a/src/ui/native.ts b/src/ui/native.ts new file mode 100644 index 0000000..eb3c478 --- /dev/null +++ b/src/ui/native.ts @@ -0,0 +1,241 @@ +// src/ui/native.ts +import { spawn } from 'child_process'; + +const isTestEnv = () => { + return ( + process.env.NODE_ENV === 'test' || + process.env.VITEST === 'true' || + !!process.env.VITEST || + process.env.CI === 'true' || + !!process.env.CI || + process.env.NODE9_TESTING === '1' + ); +}; + +/** + * Sends a non-blocking, one-way system notification. + */ +export function sendDesktopNotification(title: string, body: string): void { + if (isTestEnv()) return; + + try { + const safeTitle = title.replace(/"/g, '\\"'); + const safeBody = body.replace(/"/g, '\\"'); + + if (process.platform === 'darwin') { + const script = `display notification "${safeBody}" with title "${safeTitle}"`; + spawn('osascript', ['-e', script], { detached: true, stdio: 'ignore' }).unref(); + } else if (process.platform === 'linux') { + spawn('notify-send', [safeTitle, safeBody, '--icon=dialog-warning'], { + detached: true, + stdio: 'ignore', + }).unref(); + } + } catch { + /* Silent fail for notifications */ + } +} + +/** + * Formats tool arguments into readable key: value lines. + * Each value is truncated to avoid overwhelming the popup. + */ +function formatArgs(args: unknown): string { + if (args === null || args === undefined) return '(none)'; + + if (typeof args !== 'object' || Array.isArray(args)) { + const str = typeof args === 'string' ? args : JSON.stringify(args); + return str.length > 200 ? str.slice(0, 200) + '…' : str; + } + + const entries = Object.entries(args as Record).filter( + ([, v]) => v !== null && v !== undefined && v !== '' + ); + + if (entries.length === 0) return '(none)'; + + const MAX_FIELDS = 5; + const MAX_VALUE_LEN = 120; + + const lines = entries.slice(0, MAX_FIELDS).map(([key, val]) => { + const str = typeof val === 'string' ? val : JSON.stringify(val); + const truncated = str.length > MAX_VALUE_LEN ? str.slice(0, MAX_VALUE_LEN) + '…' : str; + return ` ${key}: ${truncated}`; + }); + + if (entries.length > MAX_FIELDS) { + lines.push(` … and ${entries.length - MAX_FIELDS} more field(s)`); + } + + return lines.join('\n'); +} + +/** + * Triggers an asynchronous, two-way OS dialog box. + * Returns: 'allow' | 'deny' | 'always_allow' + */ +export async function askNativePopup( + toolName: string, + args: unknown, + agent?: string, + explainableLabel?: string, + locked: boolean = false, // Phase 4.1: The Remote Lock + signal?: AbortSignal // Phase 4.2: The Auto-Close Trigger +): Promise<'allow' | 'deny' | 'always_allow'> { + if (isTestEnv()) return 'deny'; + if (process.env.NODE9_DEBUG === '1' || process.env.VITEST) { + console.log(`[DEBUG Native] askNativePopup called for: ${toolName}`); + console.log(`[DEBUG Native] isTestEnv check:`, { + VITEST: process.env.VITEST, + NODE_ENV: process.env.NODE_ENV, + CI: process.env.CI, + isTest: isTestEnv(), + }); + } + + const title = locked + ? `⚑ Node9 β€” Locked by Admin Policy` + : `πŸ›‘οΈ Node9 β€” Action Requires Approval`; + + // Build a structured, scannable message + let message = ''; + + if (locked) { + message += `⚑ Awaiting remote approval via Slack. Local override is disabled.\n`; + message += `─────────────────────────────────\n`; + } + + message += `Tool: ${toolName}\n`; + message += `Agent: ${agent || 'AI Agent'}\n`; + if (explainableLabel) { + message += `Reason: ${explainableLabel}\n`; + } + message += `\nArguments:\n${formatArgs(args)}`; + + if (!locked) { + message += `\n\nEnter = Allow | Click "Block" to deny`; + } + + // Escape for shell/applescript safety + const safeMessage = message.replace(/\\/g, '\\\\').replace(/"/g, '\\"').replace(/`/g, "'"); + const safeTitle = title.replace(/"/g, '\\"'); + + return new Promise((resolve) => { + let childProcess: ReturnType | null = null; + + // The Auto-Close Logic (Fires when Cloud wins the race) + const onAbort = () => { + if (childProcess) { + try { + process.kill(childProcess.pid!, 'SIGKILL'); + } catch {} + } + resolve('deny'); + }; + + if (signal) { + if (signal.aborted) return resolve('deny'); + signal.addEventListener('abort', onAbort); + } + + const cleanup = () => { + if (signal) signal.removeEventListener('abort', onAbort); + }; + + try { + // --- macOS --- + if (process.platform === 'darwin') { + // Default button is "Allow" β€” Enter = permit, Escape = Block + const buttons = locked + ? `buttons {"Waiting…"} default button "Waiting…"` + : `buttons {"Block", "Always Allow", "Allow"} default button "Allow" cancel button "Block"`; + + const script = ` + tell application "System Events" + activate + display dialog "${safeMessage}" with title "${safeTitle}" ${buttons} + end tell`; + + childProcess = spawn('osascript', ['-e', script]); + let output = ''; + childProcess.stdout?.on('data', (d) => (output += d.toString())); + + childProcess.on('close', (code) => { + cleanup(); + if (locked) return resolve('deny'); + if (code === 0) { + if (output.includes('Always Allow')) return resolve('always_allow'); + if (output.includes('Allow')) return resolve('allow'); + } + resolve('deny'); + }); + } + + // --- Linux --- + else if (process.platform === 'linux') { + const argsList = locked + ? [ + '--info', + '--title', + title, + '--text', + safeMessage, + '--ok-label', + 'Waiting for Slack…', + '--timeout', + '300', + ] + : [ + '--question', + '--title', + title, + '--text', + safeMessage, + '--ok-label', + 'Allow', + '--cancel-label', + 'Block', + '--extra-button', + 'Always Allow', + '--timeout', + '300', + ]; + + childProcess = spawn('zenity', argsList); + let output = ''; + childProcess.stdout?.on('data', (d) => (output += d.toString())); + + childProcess.on('close', (code) => { + cleanup(); + if (locked) return resolve('deny'); + // zenity: --ok-label (Allow) = exit 0, --cancel-label (Block) = exit 1, extra-button = stdout + if (output.trim() === 'Always Allow') return resolve('always_allow'); + if (code === 0) return resolve('allow'); // clicked "Allow" (ok-label, Enter) + resolve('deny'); // clicked "Block" or timed out + }); + } + + // --- Windows --- + else if (process.platform === 'win32') { + const buttonType = locked ? 'OK' : 'YesNo'; + const ps = ` + Add-Type -AssemblyName PresentationFramework; + $res = [System.Windows.MessageBox]::Show("${safeMessage}", "${safeTitle}", "${buttonType}", "Warning", "Button2", "DefaultDesktopOnly"); + if ($res -eq "Yes") { exit 0 } else { exit 1 }`; + + childProcess = spawn('powershell', ['-Command', ps]); + childProcess.on('close', (code) => { + cleanup(); + if (locked) return resolve('deny'); + resolve(code === 0 ? 'allow' : 'deny'); + }); + } else { + cleanup(); + resolve('deny'); + } + } catch { + cleanup(); + resolve('deny'); + } + }); +} diff --git a/src/undo.ts b/src/undo.ts new file mode 100644 index 0000000..e233a13 --- /dev/null +++ b/src/undo.ts @@ -0,0 +1,91 @@ +// src/undo.ts +import { spawnSync } from 'child_process'; +import fs from 'fs'; +import path from 'path'; +import os from 'os'; + +const UNDO_LATEST_PATH = path.join(os.homedir(), '.node9', 'undo_latest.txt'); + +/** + * Creates a "Shadow Snapshot" of the current repository state. + * Uses a temporary Git index to ensure we don't interfere with the + * user's own staged changes. + */ +export async function createShadowSnapshot(): Promise { + try { + const cwd = process.cwd(); + if (!fs.existsSync(path.join(cwd, '.git'))) return null; + + // Use a unique temp index file so we don't touch the user's staging area + const tempIndex = path.join(cwd, '.git', `node9_index_${Date.now()}`); + const env = { ...process.env, GIT_INDEX_FILE: tempIndex }; + + // 1. Stage all changes into the TEMP index + spawnSync('git', ['add', '-A'], { env }); + + // 2. Create a tree object from the TEMP index + const treeRes = spawnSync('git', ['write-tree'], { env }); + const treeHash = treeRes.stdout.toString().trim(); + + // Clean up the temp index file immediately + if (fs.existsSync(tempIndex)) fs.unlinkSync(tempIndex); + + if (!treeHash || treeRes.status !== 0) return null; + + // 3. Create a dangling commit (not attached to any branch) + const commitRes = spawnSync('git', [ + 'commit-tree', + treeHash, + '-m', + `Node9 AI Snapshot: ${new Date().toISOString()}`, + ]); + const commitHash = commitRes.stdout.toString().trim(); + + if (commitHash && commitRes.status === 0) { + const dir = path.dirname(UNDO_LATEST_PATH); + if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true }); + fs.writeFileSync(UNDO_LATEST_PATH, commitHash); + return commitHash; + } + } catch (err) { + if (process.env.NODE9_DEBUG === '1') { + console.error('[Node9 Undo Engine Error]:', err); + } + } + return null; +} + +/** + * Reverts the current directory to a specific Git commit hash. + * Also removes files that were created after the snapshot (git restore + * alone does not delete files that aren't in the source tree). + */ +export function applyUndo(hash: string): boolean { + try { + // 1. Restore all tracked files to snapshot state + const restore = spawnSync('git', ['restore', '--source', hash, '--staged', '--worktree', '.']); + if (restore.status !== 0) return false; + + // 2. Find files in the snapshot tree + const lsTree = spawnSync('git', ['ls-tree', '-r', '--name-only', hash]); + const snapshotFiles = new Set(lsTree.stdout.toString().trim().split('\n').filter(Boolean)); + + // 3. Find currently tracked files that weren't in the snapshot β†’ delete them + const lsCurrent = spawnSync('git', ['ls-files']); + const currentFiles = lsCurrent.stdout.toString().trim().split('\n').filter(Boolean); + for (const file of currentFiles) { + if (!snapshotFiles.has(file) && fs.existsSync(file)) { + fs.unlinkSync(file); + } + } + + return true; + } catch { + return false; + } +} + +export function getLatestSnapshotHash(): string | null { + if (!fs.existsSync(UNDO_LATEST_PATH)) return null; + return fs.readFileSync(UNDO_LATEST_PATH, 'utf-8').trim(); +} diff --git a/tsup.config.ts b/tsup.config.ts index 83feb95..567e5ae 100644 --- a/tsup.config.ts +++ b/tsup.config.ts @@ -9,4 +9,7 @@ export default defineConfig({ dts: true, clean: true, splitting: false, + esbuildOptions(options) { + options.loader = { ...options.loader, '.html': 'text' }; + }, }); diff --git a/vitest.config.mts b/vitest.config.mts index 0f7a6b8..6e64f36 100644 --- a/vitest.config.mts +++ b/vitest.config.mts @@ -3,6 +3,7 @@ import { defineConfig } from 'vitest/config'; export default defineConfig({ test: { environment: 'node', + env: { NODE9_TESTING: '1' }, clearMocks: true, }, });