diff --git a/Cargo.lock b/Cargo.lock
index 05bf0890..d1e5015b 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -447,7 +447,7 @@ checksum = "c3e64b0cc0439b12df2fa678eae89a1c56a529fd067a9115f7827f1fffd22b32"
[[package]]
name = "cli-sub-agent"
-version = "0.1.57"
+version = "0.1.58"
dependencies = [
"anyhow",
"chrono",
@@ -604,7 +604,7 @@ dependencies = [
[[package]]
name = "csa-acp"
-version = "0.1.57"
+version = "0.1.58"
dependencies = [
"agent-client-protocol",
"anyhow",
@@ -623,7 +623,7 @@ dependencies = [
[[package]]
name = "csa-config"
-version = "0.1.57"
+version = "0.1.58"
dependencies = [
"anyhow",
"chrono",
@@ -639,7 +639,7 @@ dependencies = [
[[package]]
name = "csa-core"
-version = "0.1.57"
+version = "0.1.58"
dependencies = [
"agent-teams",
"chrono",
@@ -653,7 +653,7 @@ dependencies = [
[[package]]
name = "csa-executor"
-version = "0.1.57"
+version = "0.1.58"
dependencies = [
"agent-teams",
"anyhow",
@@ -677,7 +677,7 @@ dependencies = [
[[package]]
name = "csa-hooks"
-version = "0.1.57"
+version = "0.1.58"
dependencies = [
"anyhow",
"chrono",
@@ -692,7 +692,7 @@ dependencies = [
[[package]]
name = "csa-lock"
-version = "0.1.57"
+version = "0.1.58"
dependencies = [
"anyhow",
"chrono",
@@ -704,7 +704,7 @@ dependencies = [
[[package]]
name = "csa-mcp-hub"
-version = "0.1.57"
+version = "0.1.58"
dependencies = [
"anyhow",
"axum",
@@ -726,7 +726,7 @@ dependencies = [
[[package]]
name = "csa-memory"
-version = "0.1.57"
+version = "0.1.58"
dependencies = [
"anyhow",
"async-trait",
@@ -744,7 +744,7 @@ dependencies = [
[[package]]
name = "csa-process"
-version = "0.1.57"
+version = "0.1.58"
dependencies = [
"anyhow",
"csa-core",
@@ -761,7 +761,7 @@ dependencies = [
[[package]]
name = "csa-resource"
-version = "0.1.57"
+version = "0.1.58"
dependencies = [
"anyhow",
"csa-core",
@@ -776,7 +776,7 @@ dependencies = [
[[package]]
name = "csa-scheduler"
-version = "0.1.57"
+version = "0.1.58"
dependencies = [
"anyhow",
"chrono",
@@ -794,7 +794,7 @@ dependencies = [
[[package]]
name = "csa-session"
-version = "0.1.57"
+version = "0.1.58"
dependencies = [
"anyhow",
"chrono",
@@ -815,7 +815,7 @@ dependencies = [
[[package]]
name = "csa-todo"
-version = "0.1.57"
+version = "0.1.58"
dependencies = [
"anyhow",
"chrono",
@@ -3903,7 +3903,7 @@ dependencies = [
[[package]]
name = "weave"
-version = "0.1.57"
+version = "0.1.58"
dependencies = [
"anyhow",
"clap",
diff --git a/Cargo.toml b/Cargo.toml
index df27a0fc..73844560 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -3,7 +3,7 @@ members = ["crates/*"]
resolver = "2"
[workspace.package]
-version = "0.1.57"
+version = "0.1.58"
edition = "2024"
rust-version = "1.85"
license = "Apache-2.0"
diff --git a/crates/cli-sub-agent/src/pipeline.rs b/crates/cli-sub-agent/src/pipeline.rs
index 5b53a26d..e1ca293c 100644
--- a/crates/cli-sub-agent/src/pipeline.rs
+++ b/crates/cli-sub-agent/src/pipeline.rs
@@ -25,30 +25,12 @@ use crate::memory_capture;
use crate::pipeline_project_key::resolve_memory_project_key;
use crate::run_helpers::truncate_prompt;
use crate::session_guard::{SessionCleanupGuard, write_pre_exec_error_result};
+#[path = "pipeline_prompt_guard.rs"]
+mod prompt_guard;
+use prompt_guard::emit_prompt_guard_to_caller;
pub(crate) const DEFAULT_IDLE_TIMEOUT_SECONDS: u64 = 120;
pub(crate) const DEFAULT_LIVENESS_DEAD_SECONDS: u64 = csa_process::DEFAULT_LIVENESS_DEAD_SECS;
-const PROMPT_GUARD_CALLER_INJECTION_ENV: &str = "CSA_EMIT_CALLER_GUARD_INJECTION";
-
-fn should_emit_prompt_guard_to_caller() -> bool {
- match std::env::var(PROMPT_GUARD_CALLER_INJECTION_ENV) {
- Ok(raw) => {
- let normalized = raw.trim().to_ascii_lowercase();
- !matches!(normalized.as_str(), "0" | "false" | "off" | "no")
- }
- Err(_) => true,
- }
-}
-
-fn emit_prompt_guard_to_caller(guard_block: &str, guard_count: usize) {
- if !should_emit_prompt_guard_to_caller() || guard_block.trim().is_empty() {
- return;
- }
- eprintln!("[csa-hook] reverse prompt injection for caller (guards={guard_count})");
- eprintln!("");
- eprintln!("{guard_block}");
- eprintln!("");
-}
pub(crate) fn resolve_idle_timeout_seconds(
config: Option<&ProjectConfig>,
@@ -808,3 +790,7 @@ mod tests;
#[cfg(test)]
#[path = "pipeline_tests_thinking.rs"]
mod thinking_tests;
+
+#[cfg(test)]
+#[path = "pipeline_tests_prompt_guard.rs"]
+mod prompt_guard_tests;
diff --git a/crates/cli-sub-agent/src/pipeline_prompt_guard.rs b/crates/cli-sub-agent/src/pipeline_prompt_guard.rs
new file mode 100644
index 00000000..8cb740b1
--- /dev/null
+++ b/crates/cli-sub-agent/src/pipeline_prompt_guard.rs
@@ -0,0 +1,21 @@
+pub(super) const PROMPT_GUARD_CALLER_INJECTION_ENV: &str = "CSA_EMIT_CALLER_GUARD_INJECTION";
+
+pub(super) fn should_emit_prompt_guard_to_caller() -> bool {
+ match std::env::var(PROMPT_GUARD_CALLER_INJECTION_ENV) {
+ Ok(raw) => {
+ let normalized = raw.trim().to_ascii_lowercase();
+ !matches!(normalized.as_str(), "0" | "false" | "off" | "no")
+ }
+ Err(_) => true,
+ }
+}
+
+pub(super) fn emit_prompt_guard_to_caller(guard_block: &str, guard_count: usize) {
+ if !should_emit_prompt_guard_to_caller() || guard_block.trim().is_empty() {
+ return;
+ }
+ eprintln!("[csa-hook] reverse prompt injection for caller (guards={guard_count})");
+ eprintln!("");
+ eprintln!("{guard_block}");
+ eprintln!("");
+}
diff --git a/crates/cli-sub-agent/src/pipeline_tests.rs b/crates/cli-sub-agent/src/pipeline_tests.rs
index 68c3ca86..ac0ed562 100644
--- a/crates/cli-sub-agent/src/pipeline_tests.rs
+++ b/crates/cli-sub-agent/src/pipeline_tests.rs
@@ -6,19 +6,6 @@ use csa_config::{ProjectMeta, ResourcesConfig};
use csa_hooks::{FailPolicy, HookConfig, HookEvent, HooksConfig, Waiver};
use std::collections::HashMap;
use std::fs;
-use std::sync::{LazyLock, Mutex};
-
-static PROMPT_GUARD_ENV_LOCK: LazyLock> = LazyLock::new(|| Mutex::new(()));
-
-fn restore_env_var(key: &str, original: Option) {
- // SAFETY: test-scoped env mutation guarded by a process-wide mutex.
- unsafe {
- match original {
- Some(value) => std::env::set_var(key, value),
- None => std::env::remove_var(key),
- }
- }
-}
#[test]
fn determine_project_root_none_returns_cwd() {
@@ -255,36 +242,6 @@ fn resolve_liveness_dead_seconds_uses_config_then_default() {
);
}
-#[test]
-fn prompt_guard_caller_injection_defaults_to_enabled() {
- let _env_lock = PROMPT_GUARD_ENV_LOCK
- .lock()
- .expect("prompt guard env lock poisoned");
- let original = std::env::var(PROMPT_GUARD_CALLER_INJECTION_ENV).ok();
- // SAFETY: test-scoped env mutation, restored immediately.
- unsafe { std::env::remove_var(PROMPT_GUARD_CALLER_INJECTION_ENV) };
- let enabled = should_emit_prompt_guard_to_caller();
- restore_env_var(PROMPT_GUARD_CALLER_INJECTION_ENV, original);
- assert!(enabled);
-}
-
-#[test]
-fn prompt_guard_caller_injection_honors_disable_values() {
- let _env_lock = PROMPT_GUARD_ENV_LOCK
- .lock()
- .expect("prompt guard env lock poisoned");
- let original = std::env::var(PROMPT_GUARD_CALLER_INJECTION_ENV).ok();
- for value in ["0", "false", "off", "no", "FALSE"] {
- // SAFETY: test-scoped env mutation, restored immediately.
- unsafe { std::env::set_var(PROMPT_GUARD_CALLER_INJECTION_ENV, value) };
- assert!(
- !should_emit_prompt_guard_to_caller(),
- "expected value '{value}' to disable caller injection"
- );
- }
- restore_env_var(PROMPT_GUARD_CALLER_INJECTION_ENV, original);
-}
-
fn make_hooks_config(
event: HookEvent,
command: &str,
diff --git a/crates/cli-sub-agent/src/pipeline_tests_prompt_guard.rs b/crates/cli-sub-agent/src/pipeline_tests_prompt_guard.rs
new file mode 100644
index 00000000..e624dd9a
--- /dev/null
+++ b/crates/cli-sub-agent/src/pipeline_tests_prompt_guard.rs
@@ -0,0 +1,44 @@
+use super::prompt_guard::{PROMPT_GUARD_CALLER_INJECTION_ENV, should_emit_prompt_guard_to_caller};
+use std::sync::{LazyLock, Mutex};
+
+static PROMPT_GUARD_ENV_LOCK: LazyLock> = LazyLock::new(|| Mutex::new(()));
+
+fn restore_env_var(key: &str, original: Option) {
+ // SAFETY: test-scoped env mutation guarded by a process-wide mutex.
+ unsafe {
+ match original {
+ Some(value) => std::env::set_var(key, value),
+ None => std::env::remove_var(key),
+ }
+ }
+}
+
+#[test]
+fn prompt_guard_caller_injection_defaults_to_enabled() {
+ let _env_lock = PROMPT_GUARD_ENV_LOCK
+ .lock()
+ .expect("prompt guard env lock poisoned");
+ let original = std::env::var(PROMPT_GUARD_CALLER_INJECTION_ENV).ok();
+ // SAFETY: test-scoped env mutation, restored immediately.
+ unsafe { std::env::remove_var(PROMPT_GUARD_CALLER_INJECTION_ENV) };
+ let enabled = should_emit_prompt_guard_to_caller();
+ restore_env_var(PROMPT_GUARD_CALLER_INJECTION_ENV, original);
+ assert!(enabled);
+}
+
+#[test]
+fn prompt_guard_caller_injection_honors_disable_values() {
+ let _env_lock = PROMPT_GUARD_ENV_LOCK
+ .lock()
+ .expect("prompt guard env lock poisoned");
+ let original = std::env::var(PROMPT_GUARD_CALLER_INJECTION_ENV).ok();
+ for value in ["0", "false", "off", "no", "FALSE"] {
+ // SAFETY: test-scoped env mutation, restored immediately.
+ unsafe { std::env::set_var(PROMPT_GUARD_CALLER_INJECTION_ENV, value) };
+ assert!(
+ !should_emit_prompt_guard_to_caller(),
+ "expected value '{value}' to disable caller injection"
+ );
+ }
+ restore_env_var(PROMPT_GUARD_CALLER_INJECTION_ENV, original);
+}
diff --git a/crates/cli-sub-agent/src/plan_condition.rs b/crates/cli-sub-agent/src/plan_condition.rs
index 27f72630..597e670b 100644
--- a/crates/cli-sub-agent/src/plan_condition.rs
+++ b/crates/cli-sub-agent/src/plan_condition.rs
@@ -243,7 +243,7 @@ mod tests {
#[test]
fn nested_not_and_and() {
- // Pattern from dev-to-merge: (${BOT_HAS_ISSUES}) && (!(${COMMENT_IS_FALSE_POSITIVE}))
+ // Pattern from dev2merge/dev-to-merge: (${BOT_HAS_ISSUES}) && (!(${COMMENT_IS_FALSE_POSITIVE}))
let mut vars = HashMap::new();
vars.insert("BOT_HAS_ISSUES".into(), "yes".into());
// COMMENT_IS_FALSE_POSITIVE not set → !(false) = true
diff --git a/crates/csa-executor/src/executor.rs b/crates/csa-executor/src/executor.rs
index 35332b2a..c07c624a 100644
--- a/crates/csa-executor/src/executor.rs
+++ b/crates/csa-executor/src/executor.rs
@@ -7,7 +7,6 @@ use csa_process::{ExecutionResult, StreamMode};
use csa_session::state::{MetaSessionState, ToolState};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
-use std::fs;
use std::path::{Path, PathBuf};
use tokio::process::Command;
@@ -16,6 +15,12 @@ use crate::transport::{
LegacyTransport, SandboxTransportConfig, Transport, TransportFactory, TransportOptions,
TransportResult,
};
+#[path = "executor_arg_helpers.rs"]
+mod arg_helpers;
+use arg_helpers::{
+ append_gemini_include_directories_args, codex_notify_suppression_args,
+ effective_gemini_model_override, gemini_include_directories,
+};
pub const MAX_ARGV_PROMPT_LEN: usize = 100 * 1024;
@@ -138,11 +143,6 @@ pub enum Executor {
}
impl Executor {
- const GEMINI_INCLUDE_DIRS_ENV_KEYS: &[&str] = &[
- "CSA_GEMINI_INCLUDE_DIRECTORIES",
- "GEMINI_INCLUDE_DIRECTORIES",
- ];
-
/// Get the tool name as a string.
pub fn tool_name(&self) -> &'static str {
match self {
@@ -321,11 +321,8 @@ impl Executor {
if let Some(env) = extra_env {
Self::inject_env(&mut cmd, env);
}
- let gemini_include_directories = Self::gemini_include_directories(
- extra_env,
- prompt,
- Some(Path::new(&session.project_path)),
- );
+ let gemini_include_directories =
+ gemini_include_directories(extra_env, prompt, Some(Path::new(&session.project_path)));
let (prompt_transport, stdin_data) = self.select_prompt_transport(prompt);
self.append_tool_args_with_transport(
&mut cmd,
@@ -462,15 +459,15 @@ impl Executor {
Self::inject_env(&mut cmd, env);
}
let gemini_include_directories =
- Self::gemini_include_directories(extra_env, prompt, Some(work_dir));
+ gemini_include_directories(extra_env, prompt, Some(work_dir));
self.append_yolo_args(&mut cmd);
self.append_model_args(&mut cmd);
if matches!(self, Self::GeminiCli { .. }) {
- Self::append_gemini_include_directories_args(&mut cmd, &gemini_include_directories);
+ append_gemini_include_directories_args(&mut cmd, &gemini_include_directories);
}
if matches!(self, Self::Codex { .. }) {
if let Some(env) = extra_env {
- cmd.args(Self::codex_notify_suppression_args(env));
+ cmd.args(codex_notify_suppression_args(env));
}
}
let (prompt_transport, stdin_data) = self.select_prompt_transport(prompt);
@@ -586,7 +583,7 @@ impl Executor {
// Yolo flag for gemini (other tools handle it in structural args above)
if matches!(self, Self::GeminiCli { .. }) {
cmd.arg("-y");
- Self::append_gemini_include_directories_args(cmd, gemini_include_directories);
+ append_gemini_include_directories_args(cmd, gemini_include_directories);
}
// Session resume
@@ -643,7 +640,7 @@ impl Executor {
model_override,
thinking_budget,
} => {
- if let Some(model) = Self::effective_gemini_model_override(model_override) {
+ if let Some(model) = effective_gemini_model_override(model_override) {
cmd.arg("-m").arg(model);
}
if let Some(budget) = thinking_budget {
@@ -702,162 +699,6 @@ impl Executor {
}
}
- /// `"default"` means "delegate model routing to gemini-cli", so omit `-m`.
- fn effective_gemini_model_override(model_override: &Option) -> Option<&str> {
- model_override
- .as_deref()
- .map(str::trim)
- .filter(|model| !model.eq_ignore_ascii_case("default"))
- .filter(|model| !model.is_empty())
- }
-
- fn codex_notify_suppression_args(env: &HashMap) -> Vec {
- match env.get("CSA_SUPPRESS_NOTIFY").map(String::as_str) {
- Some("1") => vec!["-c".to_string(), "notify=[]".to_string()],
- _ => Vec::new(),
- }
- }
-
- fn gemini_include_directories(
- extra_env: Option<&HashMap>,
- prompt: &str,
- execution_dir: Option<&Path>,
- ) -> Vec {
- let mut directories = Vec::new();
-
- if let Some(dir) = execution_dir {
- Self::push_unique_directory_string(&mut directories, dir.to_string_lossy().as_ref());
- }
-
- if let Some(env) = extra_env {
- let raw = Self::GEMINI_INCLUDE_DIRS_ENV_KEYS
- .iter()
- .find_map(|key| env.get(*key))
- .map(String::as_str)
- .unwrap_or_default();
-
- for entry in raw.split([',', '\n']) {
- let directory = entry.trim();
- if directory.is_empty() {
- continue;
- }
- if Path::new(directory).is_relative() {
- if let Some(base) = execution_dir {
- let combined = base.join(directory);
- Self::push_unique_directory_string(
- &mut directories,
- combined.to_string_lossy().as_ref(),
- );
- } else {
- Self::push_unique_directory_string(&mut directories, directory);
- }
- continue;
- }
- Self::push_unique_directory_string(&mut directories, directory);
- }
- }
-
- for directory in Self::gemini_prompt_directories(prompt) {
- Self::push_unique_directory_string(&mut directories, &directory);
- }
-
- directories
- }
-
- fn gemini_prompt_directories(prompt: &str) -> Vec {
- let mut directories = Vec::new();
- let tokens: Vec = prompt
- .split_whitespace()
- .map(Self::trim_prompt_path_token)
- .filter(|token| !token.is_empty())
- .collect();
-
- let mut index = 0;
- while index < tokens.len() {
- if !tokens[index].starts_with('/') || tokens[index].contains("://") {
- index += 1;
- continue;
- }
-
- let mut candidate = String::new();
- let mut best_match: Option<(usize, PathBuf)> = None;
- for (end, token) in tokens.iter().enumerate().skip(index) {
- if end > index {
- if token.starts_with('/') {
- break;
- }
- candidate.push(' ');
- }
- candidate.push_str(token);
-
- let path = Path::new(&candidate);
- if path.is_absolute() && path.exists() {
- best_match = Some((end, path.to_path_buf()));
- }
- }
-
- if let Some((end, path)) = best_match {
- let dir = if path.is_dir() {
- path
- } else if let Some(parent) = path.parent() {
- parent.to_path_buf()
- } else {
- index += 1;
- continue;
- };
-
- let normalized = fs::canonicalize(&dir).unwrap_or(dir);
- Self::push_unique_directory_string(
- &mut directories,
- normalized.to_string_lossy().as_ref(),
- );
- index = end + 1;
- } else {
- index += 1;
- }
- }
- directories
- }
-
- fn trim_prompt_path_token(raw: &str) -> String {
- raw.trim_matches(|c: char| {
- matches!(
- c,
- '"' | '\''
- | '`'
- | ','
- | ';'
- | ':'
- | '.'
- | '('
- | ')'
- | '['
- | ']'
- | '{'
- | '}'
- | '<'
- | '>'
- )
- })
- .to_string()
- }
-
- fn push_unique_directory_string(directories: &mut Vec, directory: &str) {
- if directory.is_empty() {
- return;
- }
- if directories.iter().any(|existing| existing == directory) {
- return;
- }
- directories.push(directory.to_string());
- }
-
- fn append_gemini_include_directories_args(cmd: &mut Command, directories: &[String]) {
- for directory in directories {
- cmd.arg("--include-directories").arg(directory);
- }
- }
-
/// Append "yolo" args (bypass approvals).
fn append_yolo_args(&self, cmd: &mut Command) {
for arg in self.yolo_args() {
diff --git a/crates/csa-executor/src/executor_arg_helpers.rs b/crates/csa-executor/src/executor_arg_helpers.rs
new file mode 100644
index 00000000..7aa46fdf
--- /dev/null
+++ b/crates/csa-executor/src/executor_arg_helpers.rs
@@ -0,0 +1,162 @@
+use std::collections::HashMap;
+use std::fs;
+use std::path::{Path, PathBuf};
+use tokio::process::Command;
+
+const GEMINI_INCLUDE_DIRS_ENV_KEYS: &[&str] = &[
+ "CSA_GEMINI_INCLUDE_DIRECTORIES",
+ "GEMINI_INCLUDE_DIRECTORIES",
+];
+
+/// `"default"` means "delegate model routing to gemini-cli", so omit `-m`.
+pub(crate) fn effective_gemini_model_override(model_override: &Option) -> Option<&str> {
+ model_override
+ .as_deref()
+ .map(str::trim)
+ .filter(|model| !model.eq_ignore_ascii_case("default"))
+ .filter(|model| !model.is_empty())
+}
+
+pub(crate) fn codex_notify_suppression_args(env: &HashMap) -> Vec {
+ match env.get("CSA_SUPPRESS_NOTIFY").map(String::as_str) {
+ Some("1") => vec!["-c".to_string(), "notify=[]".to_string()],
+ _ => Vec::new(),
+ }
+}
+
+pub(crate) fn gemini_include_directories(
+ extra_env: Option<&HashMap>,
+ prompt: &str,
+ execution_dir: Option<&Path>,
+) -> Vec {
+ let mut directories = Vec::new();
+
+ if let Some(dir) = execution_dir {
+ push_unique_directory_string(&mut directories, dir.to_string_lossy().as_ref());
+ }
+
+ if let Some(env) = extra_env {
+ let raw = GEMINI_INCLUDE_DIRS_ENV_KEYS
+ .iter()
+ .find_map(|key| env.get(*key))
+ .map(String::as_str)
+ .unwrap_or_default();
+
+ for entry in raw.split([',', '\n']) {
+ let directory = entry.trim();
+ if directory.is_empty() {
+ continue;
+ }
+ if Path::new(directory).is_relative() {
+ if let Some(base) = execution_dir {
+ let combined = base.join(directory);
+ push_unique_directory_string(
+ &mut directories,
+ combined.to_string_lossy().as_ref(),
+ );
+ } else {
+ push_unique_directory_string(&mut directories, directory);
+ }
+ continue;
+ }
+ push_unique_directory_string(&mut directories, directory);
+ }
+ }
+
+ for directory in gemini_prompt_directories(prompt) {
+ push_unique_directory_string(&mut directories, &directory);
+ }
+
+ directories
+}
+
+pub(crate) fn append_gemini_include_directories_args(cmd: &mut Command, directories: &[String]) {
+ for directory in directories {
+ cmd.arg("--include-directories").arg(directory);
+ }
+}
+
+fn gemini_prompt_directories(prompt: &str) -> Vec {
+ let mut directories = Vec::new();
+ let tokens: Vec = prompt
+ .split_whitespace()
+ .map(trim_prompt_path_token)
+ .filter(|token| !token.is_empty())
+ .collect();
+
+ let mut index = 0;
+ while index < tokens.len() {
+ if !tokens[index].starts_with('/') || tokens[index].contains("://") {
+ index += 1;
+ continue;
+ }
+
+ let mut candidate = String::new();
+ let mut best_match: Option<(usize, PathBuf)> = None;
+ for (end, token) in tokens.iter().enumerate().skip(index) {
+ if end > index {
+ if token.starts_with('/') {
+ break;
+ }
+ candidate.push(' ');
+ }
+ candidate.push_str(token);
+
+ let path = Path::new(&candidate);
+ if path.is_absolute() && path.exists() {
+ best_match = Some((end, path.to_path_buf()));
+ }
+ }
+
+ if let Some((end, path)) = best_match {
+ let dir = if path.is_dir() {
+ path
+ } else if let Some(parent) = path.parent() {
+ parent.to_path_buf()
+ } else {
+ index += 1;
+ continue;
+ };
+
+ let normalized = fs::canonicalize(&dir).unwrap_or(dir);
+ push_unique_directory_string(&mut directories, normalized.to_string_lossy().as_ref());
+ index = end + 1;
+ } else {
+ index += 1;
+ }
+ }
+ directories
+}
+
+fn trim_prompt_path_token(raw: &str) -> String {
+ raw.trim_matches(|c: char| {
+ matches!(
+ c,
+ '"' | '\''
+ | '`'
+ | ','
+ | ';'
+ | ':'
+ | '.'
+ | '('
+ | ')'
+ | '['
+ | ']'
+ | '{'
+ | '}'
+ | '<'
+ | '>'
+ )
+ })
+ .to_string()
+}
+
+fn push_unique_directory_string(directories: &mut Vec, directory: &str) {
+ if directory.is_empty() {
+ return;
+ }
+ if directories.iter().any(|existing| existing == directory) {
+ return;
+ }
+ directories.push(directory.to_string());
+}
diff --git a/crates/csa-process/src/lib.rs b/crates/csa-process/src/lib.rs
index 51a9c256..09067336 100644
--- a/crates/csa-process/src/lib.rs
+++ b/crates/csa-process/src/lib.rs
@@ -12,7 +12,18 @@ use csa_resource::cgroup::SandboxConfig;
use csa_resource::sandbox::{SandboxCapability, detect_sandbox_capability};
mod idle_watchdog;
use idle_watchdog::should_terminate_for_idle;
+#[path = "lib_output_helpers.rs"]
+mod output_helpers;
mod tool_liveness;
+#[cfg(test)]
+use output_helpers::{DEFAULT_HEARTBEAT_SECS, HEARTBEAT_INTERVAL_ENV};
+use output_helpers::{
+ accumulate_and_flush_lines, accumulate_and_flush_stderr, extract_summary, failure_summary,
+ flush_line_buf, flush_stderr_buf, maybe_emit_heartbeat, resolve_heartbeat_interval,
+ spool_chunk,
+};
+#[cfg(test)]
+use output_helpers::{last_non_empty_line, truncate_line};
pub use tool_liveness::{DEFAULT_LIVENESS_DEAD_SECS, ToolLiveness};
#[cfg(feature = "codex-pty-fork")]
@@ -77,8 +88,6 @@ pub const DEFAULT_IDLE_TIMEOUT_SECS: u64 = 300;
pub const DEFAULT_STDIN_WRITE_TIMEOUT_SECS: u64 = 30;
pub const DEFAULT_TERMINATION_GRACE_PERIOD_SECS: u64 = 5;
const IDLE_POLL_INTERVAL: Duration = Duration::from_millis(200);
-const DEFAULT_HEARTBEAT_SECS: u64 = 20;
-const HEARTBEAT_INTERVAL_ENV: &str = "CSA_TOOL_HEARTBEAT_SECS";
/// Spawn-time process control options.
#[derive(Debug, Clone, Copy)]
@@ -650,121 +659,6 @@ pub async fn run_and_capture_with_stdin(
.await
}
-/// Write a raw byte chunk to the spool file and flush.
-///
-/// Best-effort: errors are silently ignored because the spool is a crash-recovery
-/// aid, not the primary output path.
-fn spool_chunk(spool: &mut Option, bytes: &[u8]) {
- if let Some(f) = spool {
- use std::io::Write;
- let _ = f.write_all(bytes);
- let _ = f.flush();
- }
-}
-
-fn resolve_heartbeat_interval() -> Option {
- let raw = std::env::var(HEARTBEAT_INTERVAL_ENV).ok();
- let secs = match raw {
- Some(value) => match value.trim().parse::() {
- Ok(0) => return None,
- Ok(parsed) => parsed,
- Err(_) => DEFAULT_HEARTBEAT_SECS,
- },
- None => DEFAULT_HEARTBEAT_SECS,
- };
- Some(Duration::from_secs(secs))
-}
-
-fn maybe_emit_heartbeat(
- heartbeat_interval: Option,
- execution_start: Instant,
- last_activity: Instant,
- last_heartbeat: &mut Instant,
- idle_timeout: Duration,
-) {
- let Some(interval) = heartbeat_interval else {
- return;
- };
-
- let now = Instant::now();
- let idle_for = now.saturating_duration_since(last_activity);
- if idle_for < interval {
- return;
- }
- if now.saturating_duration_since(*last_heartbeat) < interval {
- return;
- }
-
- let elapsed = now.saturating_duration_since(execution_start);
- eprintln!(
- "[csa-heartbeat] tool still running: elapsed={}s idle={}s idle-timeout={}s",
- elapsed.as_secs(),
- idle_for.as_secs(),
- idle_timeout.as_secs()
- );
- *last_heartbeat = now;
-}
-
-/// Accumulate a chunk of bytes into a line buffer, flushing complete lines to output.
-///
-/// When a `\n` is found, the complete line (including `\n`) is appended to `output`
-/// and optionally tee'd to stderr. Partial data remains in `line_buf` until more
-/// data arrives or EOF triggers `flush_line_buf`.
-fn accumulate_and_flush_lines(
- chunk: &str,
- line_buf: &mut String,
- output: &mut String,
- stream_mode: StreamMode,
-) {
- line_buf.push_str(chunk);
- while let Some(newline_pos) = line_buf.find('\n') {
- let line: String = line_buf.drain(..=newline_pos).collect();
- if stream_mode == StreamMode::TeeToStderr {
- eprint!("[stdout] {line}");
- }
- output.push_str(&line);
- }
-}
-
-/// Flush any remaining partial line from the stdout line buffer on EOF.
-fn flush_line_buf(line_buf: &mut String, output: &mut String, stream_mode: StreamMode) {
- if !line_buf.is_empty() {
- if stream_mode == StreamMode::TeeToStderr {
- eprint!("[stdout] {line_buf}");
- }
- output.push_str(line_buf);
- line_buf.clear();
- }
-}
-
-/// Accumulate stderr chunk, flushing complete lines in real-time.
-fn accumulate_and_flush_stderr(
- chunk: &str,
- line_buf: &mut String,
- stderr_output: &mut String,
- stream_mode: StreamMode,
-) {
- line_buf.push_str(chunk);
- while let Some(newline_pos) = line_buf.find('\n') {
- let line: String = line_buf.drain(..=newline_pos).collect();
- if stream_mode == StreamMode::TeeToStderr {
- eprint!("{line}");
- }
- stderr_output.push_str(&line);
- }
-}
-
-/// Flush any remaining partial stderr line on EOF.
-fn flush_stderr_buf(line_buf: &mut String, stderr_output: &mut String, stream_mode: StreamMode) {
- if !line_buf.is_empty() {
- if stream_mode == StreamMode::TeeToStderr {
- eprint!("{line_buf}");
- }
- stderr_output.push_str(line_buf);
- line_buf.clear();
- }
-}
-
async fn terminate_child_process_group(
child: &mut tokio::process::Child,
termination_grace_period: Duration,
@@ -809,49 +703,9 @@ pub async fn check_tool_installed(executable: &str) -> Result<()> {
Ok(())
}
-/// Extract summary from output (last non-empty line, truncated to 200 chars).
-fn extract_summary(output: &str) -> String {
- truncate_line(last_non_empty_line(output), 200)
-}
-
-/// Build summary for failed executions (exit_code != 0).
-///
-/// Priority chain:
-/// 1. stdout last non-empty line (if present — some tools write errors to stdout)
-/// 2. stderr last non-empty line (fallback for tools that write errors to stderr)
-/// 3. `"exit code {N}"` (final fallback when both streams are empty)
-fn failure_summary(stdout: &str, stderr: &str, exit_code: i32) -> String {
- let stdout_line = last_non_empty_line(stdout);
- if !stdout_line.is_empty() {
- return truncate_line(stdout_line, 200);
- }
-
- let stderr_line = last_non_empty_line(stderr);
- if !stderr_line.is_empty() {
- return truncate_line(stderr_line, 200);
- }
-
- format!("exit code {exit_code}")
-}
-
-/// Return the last non-empty line from the given text, or `""` if none.
-fn last_non_empty_line(text: &str) -> &str {
- text.lines()
- .rev()
- .find(|line| !line.trim().is_empty())
- .unwrap_or("")
-}
-
-/// Truncate a line to `max_chars` characters, appending "..." if truncated.
-fn truncate_line(line: &str, max_chars: usize) -> String {
- if line.chars().nth(max_chars).is_none() {
- line.to_string()
- } else {
- let truncated: String = line.chars().take(max_chars - 3).collect();
- format!("{truncated}...")
- }
-}
-
#[cfg(test)]
#[path = "lib_tests.rs"]
mod tests;
+#[cfg(test)]
+#[path = "lib_tests_heartbeat.rs"]
+mod tests_heartbeat;
diff --git a/crates/csa-process/src/lib_output_helpers.rs b/crates/csa-process/src/lib_output_helpers.rs
new file mode 100644
index 00000000..b401e7d3
--- /dev/null
+++ b/crates/csa-process/src/lib_output_helpers.rs
@@ -0,0 +1,167 @@
+use super::StreamMode;
+use std::time::{Duration, Instant};
+
+pub(super) const DEFAULT_HEARTBEAT_SECS: u64 = 20;
+pub(super) const HEARTBEAT_INTERVAL_ENV: &str = "CSA_TOOL_HEARTBEAT_SECS";
+
+/// Write a raw byte chunk to the spool file and flush.
+///
+/// Best-effort: errors are silently ignored because the spool is a crash-recovery
+/// aid, not the primary output path.
+pub(super) fn spool_chunk(spool: &mut Option, bytes: &[u8]) {
+ if let Some(f) = spool {
+ use std::io::Write;
+ let _ = f.write_all(bytes);
+ let _ = f.flush();
+ }
+}
+
+pub(super) fn resolve_heartbeat_interval() -> Option {
+ let raw = std::env::var(HEARTBEAT_INTERVAL_ENV).ok();
+ let secs = match raw {
+ Some(value) => match value.trim().parse::() {
+ Ok(0) => return None,
+ Ok(parsed) => parsed,
+ Err(_) => DEFAULT_HEARTBEAT_SECS,
+ },
+ None => DEFAULT_HEARTBEAT_SECS,
+ };
+ Some(Duration::from_secs(secs))
+}
+
+pub(super) fn maybe_emit_heartbeat(
+ heartbeat_interval: Option,
+ execution_start: Instant,
+ last_activity: Instant,
+ last_heartbeat: &mut Instant,
+ idle_timeout: Duration,
+) {
+ let Some(interval) = heartbeat_interval else {
+ return;
+ };
+
+ let now = Instant::now();
+ let idle_for = now.saturating_duration_since(last_activity);
+ if idle_for < interval {
+ return;
+ }
+ if now.saturating_duration_since(*last_heartbeat) < interval {
+ return;
+ }
+
+ let elapsed = now.saturating_duration_since(execution_start);
+ eprintln!(
+ "[csa-heartbeat] tool still running: elapsed={}s idle={}s idle-timeout={}s",
+ elapsed.as_secs(),
+ idle_for.as_secs(),
+ idle_timeout.as_secs()
+ );
+ *last_heartbeat = now;
+}
+
+/// Accumulate a chunk of bytes into a line buffer, flushing complete lines to output.
+///
+/// When a `\n` is found, the complete line (including `\n`) is appended to `output`
+/// and optionally tee'd to stderr. Partial data remains in `line_buf` until more
+/// data arrives or EOF triggers `flush_line_buf`.
+pub(super) fn accumulate_and_flush_lines(
+ chunk: &str,
+ line_buf: &mut String,
+ output: &mut String,
+ stream_mode: StreamMode,
+) {
+ line_buf.push_str(chunk);
+ while let Some(newline_pos) = line_buf.find('\n') {
+ let line: String = line_buf.drain(..=newline_pos).collect();
+ if stream_mode == StreamMode::TeeToStderr {
+ eprint!("[stdout] {line}");
+ }
+ output.push_str(&line);
+ }
+}
+
+/// Flush any remaining partial line from the stdout line buffer on EOF.
+pub(super) fn flush_line_buf(line_buf: &mut String, output: &mut String, stream_mode: StreamMode) {
+ if !line_buf.is_empty() {
+ if stream_mode == StreamMode::TeeToStderr {
+ eprint!("[stdout] {line_buf}");
+ }
+ output.push_str(line_buf);
+ line_buf.clear();
+ }
+}
+
+/// Accumulate stderr chunk, flushing complete lines in real-time.
+pub(super) fn accumulate_and_flush_stderr(
+ chunk: &str,
+ line_buf: &mut String,
+ stderr_output: &mut String,
+ stream_mode: StreamMode,
+) {
+ line_buf.push_str(chunk);
+ while let Some(newline_pos) = line_buf.find('\n') {
+ let line: String = line_buf.drain(..=newline_pos).collect();
+ if stream_mode == StreamMode::TeeToStderr {
+ eprint!("{line}");
+ }
+ stderr_output.push_str(&line);
+ }
+}
+
+/// Flush any remaining partial stderr line on EOF.
+pub(super) fn flush_stderr_buf(
+ line_buf: &mut String,
+ stderr_output: &mut String,
+ stream_mode: StreamMode,
+) {
+ if !line_buf.is_empty() {
+ if stream_mode == StreamMode::TeeToStderr {
+ eprint!("{line_buf}");
+ }
+ stderr_output.push_str(line_buf);
+ line_buf.clear();
+ }
+}
+
+/// Extract summary from output (last non-empty line, truncated to 200 chars).
+pub(super) fn extract_summary(output: &str) -> String {
+ truncate_line(last_non_empty_line(output), 200)
+}
+
+/// Build summary for failed executions (exit_code != 0).
+///
+/// Priority chain:
+/// 1. stdout last non-empty line (if present — some tools write errors to stdout)
+/// 2. stderr last non-empty line (fallback for tools that write errors to stderr)
+/// 3. `"exit code {N}"` (final fallback when both streams are empty)
+pub(super) fn failure_summary(stdout: &str, stderr: &str, exit_code: i32) -> String {
+ let stdout_line = last_non_empty_line(stdout);
+ if !stdout_line.is_empty() {
+ return truncate_line(stdout_line, 200);
+ }
+
+ let stderr_line = last_non_empty_line(stderr);
+ if !stderr_line.is_empty() {
+ return truncate_line(stderr_line, 200);
+ }
+
+ format!("exit code {exit_code}")
+}
+
+/// Return the last non-empty line from the given text, or `""` if none.
+pub(super) fn last_non_empty_line(text: &str) -> &str {
+ text.lines()
+ .rev()
+ .find(|line| !line.trim().is_empty())
+ .unwrap_or("")
+}
+
+/// Truncate a line to `max_chars` characters, appending "..." if truncated.
+pub(super) fn truncate_line(line: &str, max_chars: usize) -> String {
+ if line.chars().nth(max_chars).is_none() {
+ line.to_string()
+ } else {
+ let truncated: String = line.chars().take(max_chars - 3).collect();
+ format!("{truncated}...")
+ }
+}
diff --git a/crates/csa-process/src/lib_tests.rs b/crates/csa-process/src/lib_tests.rs
index 1f56d83c..3d30b199 100644
--- a/crates/csa-process/src/lib_tests.rs
+++ b/crates/csa-process/src/lib_tests.rs
@@ -1,17 +1,4 @@
use super::*;
-use std::sync::{LazyLock, Mutex};
-
-static HEARTBEAT_ENV_LOCK: LazyLock> = LazyLock::new(|| Mutex::new(()));
-
-fn restore_env_var(key: &str, original: Option) {
- // SAFETY: test-scoped env mutation guarded by a process-wide mutex.
- unsafe {
- match original {
- Some(value) => std::env::set_var(key, value),
- None => std::env::remove_var(key),
- }
- }
-}
#[test]
fn test_extract_summary_empty() {
@@ -603,32 +590,6 @@ fn test_stream_mode_debug_format() {
assert_eq!(format!("{:?}", StreamMode::TeeToStderr), "TeeToStderr");
}
-#[test]
-fn test_resolve_heartbeat_interval_default_enabled() {
- let _env_lock = HEARTBEAT_ENV_LOCK
- .lock()
- .expect("heartbeat env lock poisoned");
- let original = std::env::var(HEARTBEAT_INTERVAL_ENV).ok();
- // SAFETY: test-scoped env mutation, restored immediately.
- unsafe { std::env::remove_var(HEARTBEAT_INTERVAL_ENV) };
- let resolved = resolve_heartbeat_interval();
- restore_env_var(HEARTBEAT_INTERVAL_ENV, original);
- assert_eq!(resolved, Some(Duration::from_secs(DEFAULT_HEARTBEAT_SECS)));
-}
-
-#[test]
-fn test_resolve_heartbeat_interval_disable_with_zero() {
- let _env_lock = HEARTBEAT_ENV_LOCK
- .lock()
- .expect("heartbeat env lock poisoned");
- let original = std::env::var(HEARTBEAT_INTERVAL_ENV).ok();
- // SAFETY: test-scoped env mutation, restored immediately.
- unsafe { std::env::set_var(HEARTBEAT_INTERVAL_ENV, "0") };
- let resolved = resolve_heartbeat_interval();
- restore_env_var(HEARTBEAT_INTERVAL_ENV, original);
- assert_eq!(resolved, None);
-}
-
#[tokio::test]
async fn test_buffer_only_captures_stdout_without_tee() {
let mut cmd = Command::new("echo");
diff --git a/crates/csa-process/src/lib_tests_heartbeat.rs b/crates/csa-process/src/lib_tests_heartbeat.rs
new file mode 100644
index 00000000..db1f26bc
--- /dev/null
+++ b/crates/csa-process/src/lib_tests_heartbeat.rs
@@ -0,0 +1,40 @@
+use super::*;
+use std::sync::{LazyLock, Mutex};
+
+static HEARTBEAT_ENV_LOCK: LazyLock> = LazyLock::new(|| Mutex::new(()));
+
+fn restore_env_var(key: &str, original: Option) {
+ // SAFETY: test-scoped env mutation guarded by a process-wide mutex.
+ unsafe {
+ match original {
+ Some(value) => std::env::set_var(key, value),
+ None => std::env::remove_var(key),
+ }
+ }
+}
+
+#[test]
+fn test_resolve_heartbeat_interval_default_enabled() {
+ let _env_lock = HEARTBEAT_ENV_LOCK
+ .lock()
+ .expect("heartbeat env lock poisoned");
+ let original = std::env::var(HEARTBEAT_INTERVAL_ENV).ok();
+ // SAFETY: test-scoped env mutation, restored immediately.
+ unsafe { std::env::remove_var(HEARTBEAT_INTERVAL_ENV) };
+ let resolved = resolve_heartbeat_interval();
+ restore_env_var(HEARTBEAT_INTERVAL_ENV, original);
+ assert_eq!(resolved, Some(Duration::from_secs(DEFAULT_HEARTBEAT_SECS)));
+}
+
+#[test]
+fn test_resolve_heartbeat_interval_disable_with_zero() {
+ let _env_lock = HEARTBEAT_ENV_LOCK
+ .lock()
+ .expect("heartbeat env lock poisoned");
+ let original = std::env::var(HEARTBEAT_INTERVAL_ENV).ok();
+ // SAFETY: test-scoped env mutation, restored immediately.
+ unsafe { std::env::set_var(HEARTBEAT_INTERVAL_ENV, "0") };
+ let resolved = resolve_heartbeat_interval();
+ restore_env_var(HEARTBEAT_INTERVAL_ENV, original);
+ assert_eq!(resolved, None);
+}
diff --git a/patterns/commit/skills/commit/SKILL.md b/patterns/commit/skills/commit/SKILL.md
index b61aeaa4..d6462189 100644
--- a/patterns/commit/skills/commit/SKILL.md
+++ b/patterns/commit/skills/commit/SKILL.md
@@ -76,7 +76,7 @@ Both layers are mandatory. The per-commit review catches issues in each individu
- **Depends on**: `security-audit` (Step 5), `ai-reviewed-commit` (Step 6)
- **Triggers**: `pr-codex-bot` (Step 9, when milestone)
-- **Used by**: `mktsk` (as commit step after each implementation task), `dev-to-merge`
+- **Used by**: `mktsk` (as commit step after each implementation task), `dev2merge`, `dev-to-merge` (legacy alias)
## Done Criteria
diff --git a/patterns/dev-to-merge/FINDINGS.md b/patterns/dev-to-merge/FINDINGS.md
index a97e583f..7314d17b 100644
--- a/patterns/dev-to-merge/FINDINGS.md
+++ b/patterns/dev-to-merge/FINDINGS.md
@@ -2,166 +2,48 @@
## Summary
-The skill-lang format successfully expressed a 24-step development workflow
-covering branch validation, quality gates, commit, PR creation, codex-bot
-review loop with false-positive arbitration, and merge. The weave compiler
-produced a valid TOML execution plan.
-
-## What Worked Well
-
-1. **Variable extraction**: The compiler correctly identified all 10 variables
- (`BRANCH`, `REPO`, `SCOPE`, `COMMIT_MSG`, `REVIEW_HAS_ISSUES`,
- `BOT_HAS_ISSUES`, `BOT_COMMENTS`, `COMMENT_IS_FALSE_POSITIVE`,
- `COMMENT_TEXT`, `PR_NUM`) from `${VAR}` placeholders across the document.
-
-2. **Control flow compilation**: `## IF` / `## ELSE` / `## ENDIF` blocks
- compiled correctly. Conditional steps received `condition` fields
- (`${REVIEW_HAS_ISSUES}`, `${BOT_HAS_ISSUES}`) and ELSE branches received
- negated conditions (`!(${BOT_HAS_ISSUES})`).
-
-3. **FOR loop compilation**: The `## FOR comment IN ${BOT_COMMENTS}` block
- produced steps with `loop_var` containing both `variable` and `collection`,
- correctly tagging all steps inside the loop body.
-
-4. **Nested control flow**: IF inside FOR compiled correctly — the comment
- processing loop contains a conditional arbitration/fix branch.
-
-5. **Hint extraction**: `Tool:`, `Tier:`, and `OnFail:` metadata lines were
- correctly extracted from step bodies and promoted to structured fields.
-
-6. **FailAction variants**: `abort`, `skip`, and `retry N` all serialized
- correctly with the appropriate TOML representation.
-
-## Limitations Found
-
-### 1. Markdown Headers Inside Code Blocks Are Parsed as Steps
-
-**Severity**: High
-
-The `## Summary` and `## Test plan` headers inside a heredoc/code fence in
-Step 15 (Create PR) were misinterpreted as new step headers. This split the
-PR creation step into 3 separate steps (15, 16, 17) with broken content.
-
-**Root cause**: The parser classifies lines by regex before considering code
-fence context. Lines starting with `## ` inside triple-backtick blocks are
-treated as step headers.
-
-**Fix needed**: Track fenced code block state (```` ``` ````) in the line
-classifier and skip structural parsing inside fences.
-
-### 2. No Step Dependency Graph
-
-**Severity**: Medium
-
-The compiled plan has `depends_on: []` for every step. Sequential ordering
-is implicit (by `id`) but the compiler does not infer dependencies from
-variable flow (e.g., Step 13 depends on Step 12 producing `COMMIT_MSG`).
-
-**Fix needed**: Data-flow analysis to populate `depends_on` from variable
-producers/consumers, enabling parallel execution of independent steps.
-
-### 3. No Loop-Back / Retry-Loop Semantics
-
-**Severity**: Medium
-
-The review-fix-re-review cycle (Steps 9-11) is expressed as a flat IF block,
-but the semantic intent is a bounded retry loop: "loop back to Step 9 if
-issues persist (max 3 rounds)." The skill-lang has no `## WHILE` or
-`## LOOP ... UNTIL` construct.
-
-**Workaround**: Express as `## FOR round IN [1,2,3]` with an IF guard, but
-this is awkward and does not support early exit on success.
-
-**Suggestion**: Add `## WHILE condition` / `## ENDWHILE` with max-iteration
-guard, or `## RETRY max_count` block.
-
-### 4. No Step Output Binding
-
-**Severity**: Medium
-
-Step 12 (Generate Commit Message) should produce a value that Step 13
-consumes as `${COMMIT_MSG}`. The current format has no way to declare that
-a step *produces* a variable value — variables are only consumed.
-
-**Fix needed**: Add `Output: ${VAR_NAME}` hint (similar to `Tool:`/`Tier:`)
-so the runtime knows which step populates which variable.
-
-### 5. No INCLUDE Resolution at Compile Time
-
-**Severity**: Low
-
-`## INCLUDE` blocks produce a placeholder step with `tool = "weave"`. The
-compiler does not resolve includes transitively. This is fine for now (the
-runtime would handle it) but means the compiled plan may have unresolved
-references.
-
-### 6. Condition Expressions Are Opaque Strings
-
-**Severity**: Low
-
-Conditions like `${REVIEW_HAS_ISSUES}` and `${COMMENT_IS_FALSE_POSITIVE}`
-are stored as raw strings. The compiler performs no validation on whether
-these are boolean-evaluable or reference defined variables. A typo like
-`${REVEW_HAS_ISSUES}` would silently compile.
-
-**Fix needed**: Validate that all variables in conditions appear in the
-extracted variable set. Optionally support simple expressions (`==`, `!=`,
-`&&`, `||`).
-
-## Suggestions for Runtime Executor Design
-
-1. **Variable resolution**: The executor needs a variable store. Steps with
- `Output: ${VAR}` hints populate the store; `${VAR}` in prompts are
- template-substituted before execution.
-
-2. **Condition evaluation**: A minimal expression evaluator for conditions.
- At minimum: truthy/falsy (non-empty = true), negation (`!(...)`), and
- equality (`== "value"`).
-
-3. **Loop execution**: FOR blocks re-execute their child steps once per
- collection element, binding the loop variable.
-
-4. **Failure handling**: `on_fail` determines behavior on step failure:
- - `abort` → halt the plan
- - `skip` → continue to next step
- - `retry N` → re-execute up to N times
- - `delegate target` → hand off to a different tool/tier
-
-5. **INCLUDE resolution**: At runtime, resolve `## INCLUDE` by loading and
- compiling the referenced PATTERN.md, then splicing its steps into the
- current plan.
-
-6. **Code fence awareness**: The parser should track fenced code blocks to
- avoid misinterpreting headers inside code as structural elements.
-
-## Post-Review Fixes
-
-After local review (csa review --branch main) flagged 3 P1 and 1 P2 issues:
-
-1. **CR-001 (P1)**: Restructured Step 15 (Create PR) to avoid `##` headers
- inside the step body. Replaced the embedded heredoc with a `${PR_BODY}`
- variable reference. Compiled plan now has exactly 24 steps (was 26).
-
-2. **CR-002 (P1)**: Quoted all `${BRANCH}` references in shell push commands
- (`"${BRANCH}"`) to prevent command injection via branch names with
- shell metacharacters.
-
-3. **CR-003 (P1)**: Added explicit NOTE comments to Steps 13 and 16 stating
- that production usage should invoke `/commit` and `/pr-codex-bot` skills
- per AGENTS.md rule 015. The raw commands here demonstrate skill-lang only.
-
-4. **CR-004 (P2)**: Swapped Step 5 (Security Scan) and Step 6 (Stage Changes)
- so staging occurs first, ensuring `git diff --cached` has content to scan.
-
-## Statistics
-
-| Metric | Value |
-|--------|-------|
-| Source lines (PATTERN.md) | ~190 |
-| Compiled steps | 24 |
-| Variables extracted | 11 |
-| IF blocks | 3 (review issues, bot issues, false positive) |
-| FOR blocks | 1 (iterate bot comments) |
-| Nested IF-in-FOR | 1 |
-| Unique tools referenced | 4 (bash, csa, claude-code, weave) |
-| Tiers referenced | 3 (tier-1-quick, tier-2-standard, tier-3-complex) |
+`dev-to-merge` is maintained as a backward-compatible alias of `dev2merge`.
+It compiles successfully and reflects the same 27-step branch-to-merge
+workflow, including mandatory `mktd` planning/debate gate and the codex-bot
+review loop.
+
+## Current Workflow Shape
+
+1. Validate branch safety (no direct work on protected branch).
+2. Enforce planning gate through `mktd`, then verify TODO artifacts
+ (checkbox tasks + `DONE WHEN`).
+3. Run `just fmt`, `just clippy`, and `just test`.
+4. Stage changes with lockfile-aware guardrails.
+5. Run security scan + `security-audit` gate.
+6. Run local review (`csa review --diff`) and fix loop when needed.
+7. Generate commit message and commit.
+8. Push branch, create PR, and trigger cloud codex review.
+9. Poll review response (inline comments + PR comments + reviews).
+10. If findings exist: evaluate, arbitrate disputed items via debate,
+ fix, rerun local review, push, retrigger bot.
+11. If clean: merge PR.
+
+## Key Improvements Captured
+
+- Kept behavior aligned with `dev2merge` while preserving legacy command
+ compatibility.
+- Added mandatory mktd planning gate before development gates.
+- Migrated review handling from per-comment loop to consolidated analysis
+ steps.
+- Hardened repository resolution with `gh repo view` primary path and
+ remote URL fallback, including `.git` suffix normalization.
+- Added top-level PR comments polling to reduce missed bot findings.
+- Added explicit branch detection guards before push operations.
+
+## Known Tradeoffs
+
+- `REPO_LOCAL` resolution block is duplicated across several bash steps for
+ step-level self-containment.
+- Bot identity detection still depends on login-name heuristics and may need
+ tuning when external naming changes.
+
+## Validation Snapshot
+
+- `weave compile` succeeds for `patterns/dev-to-merge/PATTERN.md`.
+- Local gates expected by this pattern are runnable and integrated.
+- Alias remains functionally synchronized with `dev2merge`.
diff --git a/patterns/dev-to-merge/PATTERN.md b/patterns/dev-to-merge/PATTERN.md
index 8bbfbfb8..6b0ba6e4 100644
--- a/patterns/dev-to-merge/PATTERN.md
+++ b/patterns/dev-to-merge/PATTERN.md
@@ -1,6 +1,6 @@
---
name = "dev-to-merge"
-description = "Full development cycle from branch creation through commit, PR, codex-bot review, and merge"
+description = "Full development cycle from branch creation through mktd planning, commit, PR, codex-bot review, and merge"
allowed-tools = "Bash, Read, Edit, Write, Grep, Glob, Task"
tier = "tier-3-complex"
version = "0.1.0"
@@ -10,7 +10,8 @@ version = "0.1.0"
End-to-end development workflow: implement code on a feature branch, pass all
quality gates, commit with Conventional Commits, create a PR, run codex-bot
-review loop, and merge to main.
+review loop, and merge to main. Planning is mandatory via `mktd`, and `mktd`
+internally requires adversarial `debate` evidence.
## Step 1: Validate Branch
@@ -21,7 +22,11 @@ Verify the current branch is a feature branch, not a protected branch.
If on main or dev, abort immediately.
```bash
-BRANCH="${BRANCH}"
+BRANCH="$(git branch --show-current)"
+if [ -z "${BRANCH}" ] || [ "${BRANCH}" = "HEAD" ]; then
+ echo "ERROR: Cannot determine current branch."
+ exit 1
+fi
DEFAULT_BRANCH=$(git symbolic-ref refs/remotes/origin/HEAD 2>/dev/null | sed 's@^refs/remotes/origin/@@')
if [ -z "$DEFAULT_BRANCH" ]; then DEFAULT_BRANCH="main"; fi
if [ "$BRANCH" = "$DEFAULT_BRANCH" ] || [ "$BRANCH" = "dev" ]; then
@@ -30,6 +35,45 @@ if [ "$BRANCH" = "$DEFAULT_BRANCH" ] || [ "$BRANCH" = "dev" ]; then
fi
```
+## Step 1.5: Plan with mktd (Debate Required)
+
+Tool: bash
+OnFail: abort
+
+Generate or refresh a branch TODO plan through `mktd` before development gates.
+This step MUST pass through mktd's built-in debate phase and save a TODO.
+
+## INCLUDE mktd
+
+```bash
+set -euo pipefail
+CURRENT_BRANCH="$(git branch --show-current)"
+FEATURE_INPUT="${SCOPE:-current branch changes pending merge}"
+MKTD_PROMPT="Plan dev-to-merge execution for branch ${CURRENT_BRANCH}. Scope: ${FEATURE_INPUT}. Must execute full mktd workflow and save TODO."
+set +e
+MKTD_OUTPUT="$(csa run --skill mktd "${MKTD_PROMPT}" 2>&1)"
+MKTD_STATUS=$?
+set -e
+printf '%s\n' "${MKTD_OUTPUT}"
+if [ "${MKTD_STATUS}" -ne 0 ]; then
+ echo "ERROR: mktd failed (exit=${MKTD_STATUS})." >&2
+ exit 1
+fi
+LATEST_TS="$(csa todo list --format json | jq -r --arg br "${CURRENT_BRANCH}" '[.[] | select(.branch == $br)] | sort_by(.timestamp) | last | .timestamp // empty')"
+if [ -z "${LATEST_TS}" ]; then
+ echo "ERROR: mktd did not produce a TODO for branch ${CURRENT_BRANCH}." >&2
+ exit 1
+fi
+TODO_PATH="$(csa todo show -t "${LATEST_TS}" --path)"
+if [ ! -s "${TODO_PATH}" ]; then
+ echo "ERROR: TODO file is empty: ${TODO_PATH}" >&2
+ exit 1
+fi
+grep -qF -- '- [ ] ' "${TODO_PATH}" || { echo "ERROR: TODO missing checkbox tasks: ${TODO_PATH}" >&2; exit 1; }
+grep -q 'DONE WHEN:' "${TODO_PATH}" || { echo "ERROR: TODO missing DONE WHEN clauses: ${TODO_PATH}" >&2; exit 1; }
+printf 'MKTD_TODO_TIMESTAMP=%s\nMKTD_TODO_PATH=%s\n' "${LATEST_TS}" "${TODO_PATH}"
+```
+
## Step 2: Run Formatters
Tool: bash
@@ -74,12 +118,12 @@ Verify no untracked files remain.
git add -A
if ! printf '%s' "${SCOPE:-}" | grep -Eqi 'release|version|lock|deps|dependency'; then
STAGED_FILES="$(git diff --cached --name-only)"
- if printf '%s\n' "${STAGED_FILES}" | grep -Eq '(^|/)Cargo\.toml$|(^|/)package\.json$|(^|/)pnpm-workspace\.yaml$|(^|/)go\.mod$'; then
+ if printf '%s\n' "${STAGED_FILES}" | grep -Eq '(^|/)Cargo[.]toml$|(^|/)package[.]json$|(^|/)pnpm-workspace[.]yaml$|(^|/)go[.]mod$'; then
echo "INFO: Dependency manifest change detected; preserving staged lockfiles."
- elif ! printf '%s\n' "${STAGED_FILES}" | grep -Ev '(^|/)(Cargo\.lock|package-lock\.json|pnpm-lock\.yaml|yarn\.lock|go\.sum)$' | grep -q .; then
+ elif ! printf '%s\n' "${STAGED_FILES}" | grep -Ev '(^|/)(Cargo[.]lock|package-lock[.]json|pnpm-lock[.]yaml|yarn[.]lock|go[.]sum)$' | grep -q .; then
echo "INFO: Lockfile-only staged change detected; preserving staged lockfiles."
else
- MATCHED_LOCKFILES="$(printf '%s\n' "${STAGED_FILES}" | awk '$0 ~ /(^|\/)(Cargo\.lock|package-lock\.json|pnpm-lock\.yaml|yarn\.lock|go\.sum)$/ { print }')"
+ MATCHED_LOCKFILES="$(printf '%s\n' "${STAGED_FILES}" | grep -E '(^|/)(Cargo[.]lock|package-lock[.]json|pnpm-lock[.]yaml|yarn[.]lock|go[.]sum)$' || true)"
if [ -n "${MATCHED_LOCKFILES}" ]; then
printf '%s\n' "${MATCHED_LOCKFILES}" | while read -r lockpath; do
echo "INFO: Unstaging incidental lockfile change: ${lockpath}"
@@ -223,10 +267,15 @@ scripts/gen_commit_msg.sh "${SCOPE:-}"
Tool: bash
OnFail: abort
-Create the commit using the generated message: ${COMMIT_MSG}.
+Create the commit using the generated message from Step 12.
```bash
-git commit -m "${COMMIT_MSG}"
+COMMIT_MSG_LOCAL="${STEP_12_OUTPUT:-${COMMIT_MSG:-}}"
+if [ -z "${COMMIT_MSG_LOCAL}" ]; then
+ echo "ERROR: Commit message is empty. Step 12 must output a commit message." >&2
+ exit 1
+fi
+git commit -m "${COMMIT_MSG_LOCAL}"
```
## Step 14: Ensure Version Bumped
@@ -290,6 +339,11 @@ OnFail: retry 2
Push the feature branch to the remote origin.
```bash
+BRANCH="$(git branch --show-current)"
+if [ -z "${BRANCH}" ] || [ "${BRANCH}" = "HEAD" ]; then
+ echo "ERROR: Cannot determine current branch for push."
+ exit 1
+fi
git push -u origin "${BRANCH}"
```
@@ -303,7 +357,31 @@ of changes for ${SCOPE} and a test plan checklist covering tests, linting,
security audit, and codex review.
```bash
-gh pr create --base main --title "${COMMIT_MSG}" --body "${PR_BODY}"
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
+COMMIT_MSG_LOCAL="${STEP_12_OUTPUT:-${COMMIT_MSG:-}}"
+if [ -z "${COMMIT_MSG_LOCAL}" ]; then
+ echo "ERROR: PR title is empty. Step 12 output is required." >&2
+ exit 1
+fi
+PR_BODY_LOCAL="${PR_BODY:-## Summary
+- Scope: ${SCOPE:-unspecified}
+
+## Validation
+- just fmt
+- just clippy
+- just test
+- csa review --range main...HEAD
+}"
+gh pr create --base main --repo "${REPO_LOCAL}" --title "${COMMIT_MSG_LOCAL}" --body "${PR_BODY_LOCAL}"
```
## Step 18: Trigger Codex Bot Review
@@ -315,94 +393,114 @@ Capture the PR number for polling.
```bash
set -euo pipefail
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
PR_NUM=$(gh pr view --json number -q '.number')
-gh pr comment "${PR_NUM}" --repo "${REPO}" --body "@codex review"
+COMMENT_URL="$(gh pr comment "${PR_NUM}" --repo "${REPO_LOCAL}" --body "@codex review")"
SELF_LOGIN=$(gh api user -q '.login')
-COMMENTS_PAYLOAD=$(gh pr view "${PR_NUM}" --repo "${REPO}" --json comments)
-TRIGGER_TS=$(printf '%s' "${COMMENTS_PAYLOAD}" | jq -r --arg me "${SELF_LOGIN}" '[.comments[]? | select(.author.login == $me and .body == "@codex review")] | sort_by(.createdAt) | last | .createdAt // empty')
+COMMENTS_PAYLOAD=$(gh api "repos/${REPO_LOCAL}/issues/${PR_NUM}/comments?per_page=100")
+TRIGGER_TS=$(printf '%s' "${COMMENTS_PAYLOAD}" | jq -r --arg me "${SELF_LOGIN}" '[.[]? | select((.user.login // "") == $me and (.body // "") == "@codex review")] | sort_by(.created_at) | last | .created_at // empty')
+TRIGGER_COMMENT_ID=$(printf '%s' "${COMMENT_URL}" | sed -nE 's#.*issuecomment-([0-9]+).*#\1#p')
+if [ -z "${TRIGGER_COMMENT_ID}" ]; then
+ TRIGGER_COMMENT_ID=$(printf '%s' "${COMMENTS_PAYLOAD}" | jq -r --arg me "${SELF_LOGIN}" '[.[]? | select((.user.login // "") == $me and (.body // "") == "@codex review")] | sort_by(.created_at) | last | .id // empty')
+fi
if [ -z "${TRIGGER_TS}" ]; then
TRIGGER_TS=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
fi
-printf 'PR_NUM=%s\nTRIGGER_TS=%s\n' "${PR_NUM}" "${TRIGGER_TS}"
+printf 'PR_NUM=%s\nTRIGGER_TS=%s\nTRIGGER_COMMENT_ID=%s\n' "${PR_NUM}" "${TRIGGER_TS}" "${TRIGGER_COMMENT_ID}"
```
## Step 19: Poll for Bot Response
Tool: bash
-OnFail: skip
+OnFail: abort
-Poll for bot review response with a bounded timeout (max 10 minutes).
-If the bot does not respond, fall through to UNAVAILABLE handling.
+Poll for bot review response with a bounded timeout (max 20 minutes).
+Output `1` when bot findings are present; output empty string otherwise.
```bash
-TIMEOUT=600; INTERVAL=30; ELAPSED=0
+TIMEOUT=1200; INTERVAL=30; ELAPSED=0
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
PR_NUM_FROM_STEP="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
TRIGGER_TS="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^TRIGGER_TS=//p' | tail -n1)"
+TRIGGER_COMMENT_ID="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^TRIGGER_COMMENT_ID=//p' | tail -n1)"
if [ -z "${PR_NUM_FROM_STEP}" ]; then PR_NUM_FROM_STEP="${PR_NUM}"; fi
if [ -z "${TRIGGER_TS}" ]; then TRIGGER_TS="1970-01-01T00:00:00Z"; fi
while [ "$ELAPSED" -lt "$TIMEOUT" ]; do
- PAYLOAD=$(gh pr view "${PR_NUM_FROM_STEP}" --repo "${REPO}" --json comments,reviews)
- BOT_COMMENTS=$(printf '%s' "${PAYLOAD}" | jq -r --arg ts "${TRIGGER_TS}" '[.comments[]? | select(.createdAt >= $ts and (.author.login | ascii_downcase | test("codex|bot|connector")) and (((.body // "") | ascii_downcase | contains("@codex review")) | not))] | length')
- BOT_REVIEWS=$(printf '%s' "${PAYLOAD}" | jq -r --arg ts "${TRIGGER_TS}" '[.reviews[]? | select(.submittedAt >= $ts and (.author.login | ascii_downcase | test("codex|bot|connector")))] | length')
- if [ "${BOT_COMMENTS}" -gt 0 ] || [ "${BOT_REVIEWS}" -gt 0 ]; then
- echo "Bot response received."
+ BOT_INLINE_COMMENTS=$(gh api "repos/${REPO_LOCAL}/pulls/${PR_NUM_FROM_STEP}/comments?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select(.created_at >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")))] | length')
+ BOT_PR_COMMENTS=$(gh api "repos/${REPO_LOCAL}/issues/${PR_NUM_FROM_STEP}/comments?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.created_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")) and (((.body // "") | ascii_downcase | contains("@codex review")) | not))] | length')
+ BOT_PR_FINDINGS=$(gh api "repos/${REPO_LOCAL}/issues/${PR_NUM_FROM_STEP}/comments?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.created_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")) and (((.body // "") | ascii_downcase | contains("@codex review")) | not) and ((.body // "") | ascii_downcase | test("(^|[^a-z0-9])p[0-3]([^a-z0-9]|$)|changes requested|must fix|blocking|severity|critical")))] | length')
+ BOT_REVIEWS=$(gh api "repos/${REPO_LOCAL}/pulls/${PR_NUM_FROM_STEP}/reviews?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.submitted_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")))] | length')
+ BOT_REVIEW_FINDINGS=$(gh api "repos/${REPO_LOCAL}/pulls/${PR_NUM_FROM_STEP}/reviews?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.submitted_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")) and ((((.state // "") | ascii_downcase) == "changes_requested") or ((.body // "") | ascii_downcase | test("(^|[^a-z0-9])p[0-3]([^a-z0-9]|$)|changes requested|must fix|blocking|severity|critical"))))] | length')
+ BOT_TRIGGER_REACTIONS=0
+ if [ -n "${TRIGGER_COMMENT_ID}" ]; then
+ BOT_TRIGGER_REACTIONS=$(gh api "repos/${REPO_LOCAL}/issues/comments/${TRIGGER_COMMENT_ID}/reactions?per_page=100" -H "Accept: application/vnd.github+json" | jq -r '[.[]? | select((.user.login | ascii_downcase | test("codex|bot|connector")))] | length')
+ fi
+ echo "heartbeat elapsed=${ELAPSED}s inline=${BOT_INLINE_COMMENTS} pr_comments=${BOT_PR_COMMENTS} pr_findings=${BOT_PR_FINDINGS} reviews=${BOT_REVIEWS} review_findings=${BOT_REVIEW_FINDINGS} reactions=${BOT_TRIGGER_REACTIONS}"
+ if [ "${BOT_INLINE_COMMENTS}" -gt 0 ] || [ "${BOT_PR_FINDINGS}" -gt 0 ] || [ "${BOT_REVIEW_FINDINGS}" -gt 0 ]; then
+ echo "1"
+ exit 0
+ fi
+ if [ "${BOT_PR_COMMENTS}" -gt 0 ] || [ "${BOT_REVIEWS}" -gt 0 ] || [ "${BOT_TRIGGER_REACTIONS}" -gt 0 ]; then
+ echo ""
exit 0
fi
sleep "$INTERVAL"
ELAPSED=$((ELAPSED + INTERVAL))
done
-echo "Bot did not respond within timeout."
+echo "ERROR: Timed out waiting for bot response." >&2
exit 1
```
-## IF ${BOT_HAS_ISSUES}
+## IF ${STEP_19_OUTPUT}
## Step 20: Evaluate Bot Comments
Tool: csa
Tier: tier-2-standard
-For each bot comment, classify as:
-- Category A (already fixed): react and acknowledge
-- Category B (suspected false positive): queue for arbitration
-- Category C (real issue): react and queue for fix
-
-## FOR comment IN ${BOT_COMMENTS}
+Evaluate all inline bot findings on the PR and produce a consolidated action plan.
+List suspected false positives and real defects separately.
-## Step 21: Process Comment
+## Step 21: Arbitrate Disputed Findings
Tool: csa
-Evaluate this specific bot comment against the current code state.
-Determine category (A/B/C) and take appropriate action.
+For disputed findings, run independent arbitration using `csa debate` and
+produce a verdict for each disputed item.
-## IF ${COMMENT_IS_FALSE_POSITIVE}
-
-## Step 22: Arbitrate False Positive
+## Step 22: Fix Confirmed Issues
Tool: csa
Tier: tier-2-standard
-Run `csa debate` to get an independent second opinion on the suspected
-false positive. The arbiter MUST be a different model family.
+Implement fixes for confirmed bot findings and create commit(s) with clear
+messages. Do not modify unrelated files.
-```bash
-csa debate "A code reviewer flagged: ${COMMENT_TEXT}. Evaluate independently."
-```
-
-## ELSE
-
-## Step 23: Fix Real Issue
+## Step 23: Re-run Local Review After Fixes
Tool: csa
Tier: tier-2-standard
OnFail: retry 2
-Fix the real issue identified by the bot. Commit the fix.
-
-## ENDIF
-
-## ENDFOR
+Run `csa review --diff` to validate fixes before re-triggering cloud review.
## Step 24: Push Fixes and Re-trigger Review
@@ -412,33 +510,169 @@ Push all fix commits and trigger a new round of codex review.
```bash
set -euo pipefail
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
+BRANCH="$(git branch --show-current)"
+if [ -z "${BRANCH}" ] || [ "${BRANCH}" = "HEAD" ]; then
+ echo "ERROR: Cannot determine current branch for push."
+ exit 1
+fi
git push origin "${BRANCH}"
-gh pr comment "${PR_NUM}" --repo "${REPO}" --body "@codex review"
+PR_NUM_LOCAL="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
+if [ -z "${PR_NUM_LOCAL}" ]; then
+ PR_NUM_LOCAL="$(gh pr view --json number -q '.number')"
+fi
+COMMENT_URL="$(gh pr comment "${PR_NUM_LOCAL}" --repo "${REPO_LOCAL}" --body "@codex review")"
SELF_LOGIN=$(gh api user -q '.login')
-COMMENTS_PAYLOAD=$(gh pr view "${PR_NUM}" --repo "${REPO}" --json comments)
-TRIGGER_TS=$(printf '%s' "${COMMENTS_PAYLOAD}" | jq -r --arg me "${SELF_LOGIN}" '[.comments[]? | select(.author.login == $me and .body == "@codex review")] | sort_by(.createdAt) | last | .createdAt // empty')
+COMMENTS_PAYLOAD=$(gh api "repos/${REPO_LOCAL}/issues/${PR_NUM_LOCAL}/comments?per_page=100")
+TRIGGER_TS=$(printf '%s' "${COMMENTS_PAYLOAD}" | jq -r --arg me "${SELF_LOGIN}" '[.[]? | select((.user.login // "") == $me and (.body // "") == "@codex review")] | sort_by(.created_at) | last | .created_at // empty')
+TRIGGER_COMMENT_ID=$(printf '%s' "${COMMENT_URL}" | sed -nE 's#.*issuecomment-([0-9]+).*#\1#p')
+if [ -z "${TRIGGER_COMMENT_ID}" ]; then
+ TRIGGER_COMMENT_ID=$(printf '%s' "${COMMENTS_PAYLOAD}" | jq -r --arg me "${SELF_LOGIN}" '[.[]? | select((.user.login // "") == $me and (.body // "") == "@codex review")] | sort_by(.created_at) | last | .id // empty')
+fi
if [ -z "${TRIGGER_TS}" ]; then
TRIGGER_TS=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
fi
-printf 'PR_NUM=%s\nTRIGGER_TS=%s\n' "${PR_NUM}" "${TRIGGER_TS}"
+printf 'PR_NUM=%s\nTRIGGER_TS=%s\nTRIGGER_COMMENT_ID=%s\n' "${PR_NUM_LOCAL}" "${TRIGGER_TS}" "${TRIGGER_COMMENT_ID}"
+```
+
+## Step 25: Poll Re-triggered Bot Response
+
+Tool: bash
+OnFail: abort
+
+After posting the second `@codex review`, poll again with bounded timeout.
+Output `1` when findings remain; output empty string when clean.
+
+```bash
+TIMEOUT=1200; INTERVAL=30; ELAPSED=0
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
+PR_NUM_FROM_STEP="$(printf '%s\n' "${STEP_24_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
+TRIGGER_TS="$(printf '%s\n' "${STEP_24_OUTPUT:-}" | sed -n 's/^TRIGGER_TS=//p' | tail -n1)"
+TRIGGER_COMMENT_ID="$(printf '%s\n' "${STEP_24_OUTPUT:-}" | sed -n 's/^TRIGGER_COMMENT_ID=//p' | tail -n1)"
+if [ -z "${PR_NUM_FROM_STEP}" ]; then
+ PR_NUM_FROM_STEP="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
+fi
+if [ -z "${PR_NUM_FROM_STEP}" ]; then
+ PR_NUM_FROM_STEP="$(gh pr view --json number -q '.number')"
+fi
+if [ -z "${TRIGGER_TS}" ]; then TRIGGER_TS="1970-01-01T00:00:00Z"; fi
+while [ "$ELAPSED" -lt "$TIMEOUT" ]; do
+ BOT_INLINE_COMMENTS=$(gh api "repos/${REPO_LOCAL}/pulls/${PR_NUM_FROM_STEP}/comments?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select(.created_at >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")))] | length')
+ BOT_PR_COMMENTS=$(gh api "repos/${REPO_LOCAL}/issues/${PR_NUM_FROM_STEP}/comments?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.created_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")) and (((.body // "") | ascii_downcase | contains("@codex review")) | not))] | length')
+ BOT_PR_FINDINGS=$(gh api "repos/${REPO_LOCAL}/issues/${PR_NUM_FROM_STEP}/comments?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.created_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")) and (((.body // "") | ascii_downcase | contains("@codex review")) | not) and ((.body // "") | ascii_downcase | test("(^|[^a-z0-9])p[0-3]([^a-z0-9]|$)|changes requested|must fix|blocking|severity|critical")))] | length')
+ BOT_REVIEWS=$(gh api "repos/${REPO_LOCAL}/pulls/${PR_NUM_FROM_STEP}/reviews?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.submitted_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")))] | length')
+ BOT_REVIEW_FINDINGS=$(gh api "repos/${REPO_LOCAL}/pulls/${PR_NUM_FROM_STEP}/reviews?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.submitted_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")) and ((((.state // "") | ascii_downcase) == "changes_requested") or ((.body // "") | ascii_downcase | test("(^|[^a-z0-9])p[0-3]([^a-z0-9]|$)|changes requested|must fix|blocking|severity|critical"))))] | length')
+ BOT_TRIGGER_REACTIONS=0
+ if [ -n "${TRIGGER_COMMENT_ID}" ]; then
+ BOT_TRIGGER_REACTIONS=$(gh api "repos/${REPO_LOCAL}/issues/comments/${TRIGGER_COMMENT_ID}/reactions?per_page=100" -H "Accept: application/vnd.github+json" | jq -r '[.[]? | select((.user.login | ascii_downcase | test("codex|bot|connector")))] | length')
+ fi
+ echo "heartbeat elapsed=${ELAPSED}s inline=${BOT_INLINE_COMMENTS} pr_comments=${BOT_PR_COMMENTS} pr_findings=${BOT_PR_FINDINGS} reviews=${BOT_REVIEWS} review_findings=${BOT_REVIEW_FINDINGS} reactions=${BOT_TRIGGER_REACTIONS}"
+ if [ "${BOT_INLINE_COMMENTS}" -gt 0 ] || [ "${BOT_PR_FINDINGS}" -gt 0 ] || [ "${BOT_REVIEW_FINDINGS}" -gt 0 ]; then
+ echo "1"
+ exit 0
+ fi
+ if [ "${BOT_PR_COMMENTS}" -gt 0 ] || [ "${BOT_REVIEWS}" -gt 0 ] || [ "${BOT_TRIGGER_REACTIONS}" -gt 0 ]; then
+ echo ""
+ exit 0
+ fi
+ sleep "$INTERVAL"
+ ELAPSED=$((ELAPSED + INTERVAL))
+done
+echo "ERROR: Timed out waiting for re-triggered bot response." >&2
+exit 1
+```
+
+## IF ${STEP_25_OUTPUT}
+
+## Step 26: Stop on Remaining Bot Findings
+
+Tool: bash
+OnFail: abort
+
+Abort merge when re-triggered bot review still reports findings.
+
+```bash
+echo "ERROR: Bot review still has findings after re-trigger. Do not merge." >&2
+exit 1
```
## ELSE
-## Step 25: Bot Review Clean
+## Step 27: Merge PR After Re-review Clean
-No issues found by the codex bot. Proceed to merge.
+Tool: bash
+OnFail: abort
+
+Squash-merge the PR after the second bot review returns clean, then update local main.
+
+```bash
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
+PR_NUM_LOCAL="$(printf '%s\n' "${STEP_24_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
+if [ -z "${PR_NUM_LOCAL}" ]; then
+ PR_NUM_LOCAL="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
+fi
+if [ -z "${PR_NUM_LOCAL}" ]; then
+ PR_NUM_LOCAL="$(gh pr view --json number -q '.number')"
+fi
+gh pr merge "${PR_NUM_LOCAL}" --repo "${REPO_LOCAL}" --squash --delete-branch
+git checkout main && git pull origin main
+```
## ENDIF
-## Step 26: Merge PR
+## ELSE
+
+## Step 28: Merge PR (Initial Review Clean)
Tool: bash
OnFail: abort
-Squash-merge the PR and update local main.
+No issues were found in the initial bot review. Merge using the PR number from step output.
```bash
-gh pr merge "${PR_NUM}" --repo "${REPO}" --squash --delete-branch
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
+PR_NUM_LOCAL="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
+if [ -z "${PR_NUM_LOCAL}" ]; then
+ PR_NUM_LOCAL="$(gh pr view --json number -q '.number')"
+fi
+gh pr merge "${PR_NUM_LOCAL}" --repo "${REPO_LOCAL}" --squash --delete-branch
git checkout main && git pull origin main
```
+
+## ENDIF
diff --git a/patterns/dev-to-merge/skills/dev-to-merge/SKILL.md b/patterns/dev-to-merge/skills/dev-to-merge/SKILL.md
index 65755829..18c2133d 100644
--- a/patterns/dev-to-merge/skills/dev-to-merge/SKILL.md
+++ b/patterns/dev-to-merge/skills/dev-to-merge/SKILL.md
@@ -1,6 +1,6 @@
---
name: dev-to-merge
-description: Full development cycle from branch creation through commit, PR, codex-bot review, and merge
+description: Full development cycle from branch creation through mktd planning, commit, PR, codex-bot review, and merge
allowed-tools: Bash, Read, Grep, Glob, Edit, Write
triggers:
- "dev-to-merge"
@@ -20,7 +20,7 @@ Treat the run as executor ONLY when initial prompt contains:
**YOU ARE THE EXECUTOR.** Follow these rules:
1. **SKIP the "Execution Protocol" section below** -- it is for the orchestrator, not you.
2. **Read the pattern** at `patterns/dev-to-merge/PATTERN.md` and follow it step by step.
-3. **ABSOLUTE PROHIBITION**: Do NOT run `csa run`, `csa review`, `csa debate`, or ANY `csa` command. You must perform the work DIRECTLY. Running any `csa` command causes infinite recursion.
+3. **RECURSION GUARD**: Do NOT run `csa run --skill dev2merge` or `csa run --skill dev-to-merge` from inside this skill. Other `csa` commands required by the workflow (for example `csa run --skill mktd`, `csa review`, `csa debate`) are allowed.
**Only if you are the main agent (Claude Code / human user)**:
- You are the **orchestrator**. Follow the "Execution Protocol" steps below.
@@ -29,7 +29,7 @@ Treat the run as executor ONLY when initial prompt contains:
## Purpose
-Execute the complete development lifecycle on a feature branch: format, lint, test, stage, security scan, security audit, heterogeneous code review, commit with Conventional Commits, push, create PR, trigger cloud codex-bot review, handle false-positive arbitration via debate, fix-and-retrigger loops, and final squash-merge to main. This is the "everything in one command" workflow that composes `commit`, `security-audit`, `ai-reviewed-commit`, and `pr-codex-bot` into a single end-to-end pipeline.
+Execute the complete development lifecycle on a feature branch: mandatory mktd planning (with internal debate), format, lint, test, stage, security scan, security audit, heterogeneous code review, commit with Conventional Commits, push, create PR, trigger cloud codex-bot review, handle false-positive arbitration via debate, fix-and-retrigger loops, and final squash-merge to main. This is the "everything in one command" workflow that composes `mktd`, `commit`, `security-audit`, `ai-reviewed-commit`, and `pr-codex-bot` into a single end-to-end pipeline.
## Execution Protocol (ORCHESTRATOR ONLY)
@@ -50,23 +50,24 @@ csa run --skill dev-to-merge "Implement, review, and merge "
### Step-by-Step
1. **Validate branch**: Verify on feature branch, not main/dev. Abort if protected.
-2. **Quality gates**: Run `just fmt`, `just clippy`, `just test` sequentially.
-3. **Stage changes**: `git add -A`, then unstage incidental lockfiles unless scope indicates release/dependency updates.
-4. **Security scan**: Grep staged files for hardcoded secrets.
-5. **Security audit**: Run `security-audit` via bounded bash wrapper with timeout and required `SECURITY_AUDIT_VERDICT`.
-6. **Pre-commit review**: Run `csa review --diff` (heterogeneous reviewer). Fix issues up to 3 rounds.
-7. **Re-run quality gates**: `just pre-commit` after any fixes.
-8. **Generate commit message**: Delegate to CSA (tier-1) for Conventional Commits.
-9. **Commit**: `git commit -m "${COMMIT_MSG}"`.
-10. **Version gate precheck**: auto-run `just check-version-bumped`; if needed, `just bump-patch` and create a dedicated release commit before pre-PR review/push.
-11. **Pre-PR cumulative review**: `csa review --range main...HEAD` (covers full branch, NOT just last commit). MUST pass before push.
-12. **Push**: `git push -u origin ${BRANCH}`.
-13. **Create PR**: `gh pr create --base main`.
-14. **Trigger codex bot**: post `@codex review` and capture trigger timestamp.
-15. **Poll and evaluate**: wait for bot comments/reviews newer than trigger timestamp.
-16. **Arbitrate false positives**: Use `csa debate` with independent model.
-17. **Fix real issues**: Commit fixes, push, re-trigger bot (max 10 iterations).
-18. **Merge**: `gh pr merge --squash --delete-branch`, update local main.
+2. **Plan first (mktd)**: Run `csa run --skill mktd` and require a saved TODO for current branch (checkbox tasks + `DONE WHEN`). This guarantees mktd's built-in debate phase executed.
+3. **Quality gates**: Run `just fmt`, `just clippy`, `just test` sequentially.
+4. **Stage changes**: `git add -A`, then unstage incidental lockfiles unless scope indicates release/dependency updates.
+5. **Security scan**: Grep staged files for hardcoded secrets.
+6. **Security audit**: Run `security-audit` via bounded bash wrapper with timeout and required `SECURITY_AUDIT_VERDICT`.
+7. **Pre-commit review**: Run `csa review --diff` (heterogeneous reviewer). Fix issues up to 3 rounds.
+8. **Re-run quality gates**: `just pre-commit` after any fixes.
+9. **Generate commit message**: Delegate to CSA (tier-1) for Conventional Commits.
+10. **Commit**: `git commit -m "${COMMIT_MSG}"`.
+11. **Version gate precheck**: auto-run `just check-version-bumped`; if needed, `just bump-patch` and create a dedicated release commit before pre-PR review/push.
+12. **Pre-PR cumulative review**: `csa review --range main...HEAD` (covers full branch, NOT just last commit). MUST pass before push.
+13. **Push**: `git push -u origin ${BRANCH}`.
+14. **Create PR**: `gh pr create --base main`.
+15. **Trigger codex bot**: post `@codex review` and capture trigger timestamp.
+16. **Poll and evaluate**: wait for bot comments/reviews newer than trigger timestamp.
+17. **Arbitrate false positives**: Use `csa debate` with independent model.
+18. **Fix real issues**: Commit fixes, push, re-trigger bot (max 10 iterations).
+19. **Merge**: `gh pr merge --squash --delete-branch`, update local main.
## Example Usage
@@ -74,24 +75,26 @@ csa run --skill dev-to-merge "Implement, review, and merge "
|---------|--------|
| `/dev-to-merge scope=executor` | Full cycle for executor module changes |
| `/dev-to-merge` | Full cycle for all current changes |
+| `/dev2merge` | Preferred new command (same workflow behavior) |
## Integration
-- **Composes**: `security-audit`, `ai-reviewed-commit` / `csa-review`, `commit`, `pr-codex-bot`
-- **Uses**: `debate` (for false-positive arbitration and self-authored review)
+- **Composes**: `mktd`, `security-audit`, `ai-reviewed-commit` / `csa-review`, `commit`, `pr-codex-bot`
+- **Uses**: `mktd` (mandatory planning + debate evidence), `debate` (false-positive arbitration and self-authored review)
- **Standalone**: Complete workflow -- does not need other skills to be invoked separately
## Done Criteria
1. Feature branch validated (not main/dev).
-2. `just fmt`, `just clippy`, `just test` all exit 0.
-3. Security scan found no hardcoded secrets.
-4. Security audit returned PASS or PASS_DEFERRED.
-5. Pre-commit review completed with zero unresolved P0/P1 issues.
-6. Commit created with Conventional Commits format.
-7. PR created on GitHub targeting main.
-8. Cloud codex bot triggered and response handled.
-9. All bot comments classified and actioned (fixed, arbitrated, or acknowledged).
-10. PR merged via squash-merge.
-11. Local main updated: `git checkout main && git pull origin main`.
-12. Feature branch deleted (remote and local).
+2. mktd plan completed and a branch TODO was saved (`DONE WHEN` present).
+3. `just fmt`, `just clippy`, `just test` all exit 0.
+4. Security scan found no hardcoded secrets.
+5. Security audit returned PASS or PASS_DEFERRED.
+6. Pre-commit review completed with zero unresolved P0/P1 issues.
+7. Commit created with Conventional Commits format.
+8. PR created on GitHub targeting main.
+9. Cloud codex bot triggered and response handled.
+10. All bot comments classified and actioned (fixed, arbitrated, or acknowledged).
+11. PR merged via squash-merge.
+12. Local main updated: `git checkout main && git pull origin main`.
+13. Feature branch deleted (remote and local).
diff --git a/patterns/dev-to-merge/workflow.toml b/patterns/dev-to-merge/workflow.toml
index 3cc8660a..b15959f8 100644
--- a/patterns/dev-to-merge/workflow.toml
+++ b/patterns/dev-to-merge/workflow.toml
@@ -1,43 +1,106 @@
[workflow]
name = "dev-to-merge"
-description = "Full development cycle from branch creation through commit, PR, codex-bot review, and merge"
+description = "Full development cycle from branch creation through mktd planning, commit, PR, codex-bot review, and merge"
[[workflow.variables]]
-name = "BOT_COMMENTS"
+name = "AUDIT_OUTPUT"
[[workflow.variables]]
-name = "BOT_HAS_ISSUES"
+name = "AUDIT_PROMPT"
+
+[[workflow.variables]]
+name = "AUDIT_STATUS"
+
+[[workflow.variables]]
+name = "BOT_INLINE_COMMENTS"
+
+[[workflow.variables]]
+name = "BOT_PR_COMMENTS"
+
+[[workflow.variables]]
+name = "BOT_PR_FINDINGS"
+
+[[workflow.variables]]
+name = "BOT_REVIEWS"
+
+[[workflow.variables]]
+name = "BOT_REVIEW_FINDINGS"
+
+[[workflow.variables]]
+name = "BOT_TRIGGER_REACTIONS"
[[workflow.variables]]
name = "BRANCH"
[[workflow.variables]]
-name = "COMMENT_IS_FALSE_POSITIVE"
+name = "COMMENTS_PAYLOAD"
+
+[[workflow.variables]]
+name = "COMMENT_URL"
+
+[[workflow.variables]]
+name = "COMMIT_MSG_LOCAL"
[[workflow.variables]]
-name = "COMMENT_TEXT"
+name = "ELAPSED"
[[workflow.variables]]
-name = "COMMIT_MSG"
+name = "MATCHED_LOCKFILES"
[[workflow.variables]]
-name = "PR_BODY"
+name = "ORIGIN_URL"
+
+[[workflow.variables]]
+name = "PRE_DIRTY_CARGO_LOCK"
+
+[[workflow.variables]]
+name = "PR_BODY_LOCAL"
[[workflow.variables]]
name = "PR_NUM"
[[workflow.variables]]
-name = "REPO"
+name = "PR_NUM_FROM_STEP"
[[workflow.variables]]
-name = "REVIEW_HAS_ISSUES"
+name = "PR_NUM_LOCAL"
+
+[[workflow.variables]]
+name = "REPO_LOCAL"
[[workflow.variables]]
-name = "CUMULATIVE_REVIEW_COMPLETED"
+name = "REVIEW_HAS_ISSUES"
[[workflow.variables]]
name = "SCOPE"
+[[workflow.variables]]
+name = "SELF_LOGIN"
+
+[[workflow.variables]]
+name = "STAGED_FILES"
+
+[[workflow.variables]]
+name = "STEP_19_OUTPUT"
+
+[[workflow.variables]]
+name = "STEP_25_OUTPUT"
+
+[[workflow.variables]]
+name = "TRIGGER_COMMENT_ID"
+
+[[workflow.variables]]
+name = "TRIGGER_TS"
+
+[[workflow.variables]]
+name = "VERDICT"
+
+[[workflow.variables]]
+name = "VERSION"
+
+[[workflow.variables]]
+name = "lockpath"
+
[[workflow.steps]]
id = 1
title = "Validate Branch"
@@ -47,7 +110,11 @@ Verify the current branch is a feature branch, not a protected branch.
If on main or dev, abort immediately.
```bash
-BRANCH="${BRANCH}"
+BRANCH="$(git branch --show-current)"
+if [ -z "${BRANCH}" ] || [ "${BRANCH}" = "HEAD" ]; then
+ echo "ERROR: Cannot determine current branch."
+ exit 1
+fi
DEFAULT_BRANCH=$(git symbolic-ref refs/remotes/origin/HEAD 2>/dev/null | sed 's@^refs/remotes/origin/@@')
if [ -z "$DEFAULT_BRANCH" ]; then DEFAULT_BRANCH="main"; fi
if [ "$BRANCH" = "$DEFAULT_BRANCH" ] || [ "$BRANCH" = "dev" ]; then
@@ -59,6 +126,22 @@ on_fail = "abort"
[[workflow.steps]]
id = 2
+title = "Step 1.5: Plan with mktd (Debate Required)"
+tool = "bash"
+prompt = """
+Generate or refresh a branch TODO plan through `mktd` before development gates.
+This step MUST pass through mktd's built-in debate phase and save a TODO."""
+on_fail = "abort"
+
+[[workflow.steps]]
+id = 3
+title = "Include mktd"
+tool = "weave"
+prompt = "mktd"
+on_fail = "abort"
+
+[[workflow.steps]]
+id = 4
title = "Run Formatters"
tool = "bash"
prompt = """
@@ -72,7 +155,7 @@ just fmt
retry = 2
[[workflow.steps]]
-id = 3
+id = 5
title = "Run Linters"
tool = "bash"
prompt = """
@@ -86,7 +169,7 @@ just clippy
retry = 2
[[workflow.steps]]
-id = 4
+id = 6
title = "Run Tests"
tool = "bash"
prompt = """
@@ -98,10 +181,10 @@ just test
on_fail = "abort"
[[workflow.steps]]
-id = 5
+id = 7
title = "Stage Changes"
tool = "bash"
-prompt = """
+prompt = '''
Stage all modified and new files relevant to ${SCOPE}.
Verify no untracked files remain.
@@ -109,12 +192,12 @@ Verify no untracked files remain.
git add -A
if ! printf '%s' "${SCOPE:-}" | grep -Eqi 'release|version|lock|deps|dependency'; then
STAGED_FILES="$(git diff --cached --name-only)"
- if printf '%s\n' "${STAGED_FILES}" | grep -Eq '(^|/)Cargo\.toml$|(^|/)package\.json$|(^|/)pnpm-workspace\.yaml$|(^|/)go\.mod$'; then
+ if printf '%s\n' "${STAGED_FILES}" | grep -Eq '(^|/)Cargo[.]toml$|(^|/)package[.]json$|(^|/)pnpm-workspace[.]yaml$|(^|/)go[.]mod$'; then
echo "INFO: Dependency manifest change detected; preserving staged lockfiles."
- elif ! printf '%s\n' "${STAGED_FILES}" | grep -Ev '(^|/)(Cargo\.lock|package-lock\.json|pnpm-lock\.yaml|yarn\.lock|go\.sum)$' | grep -q .; then
+ elif ! printf '%s\n' "${STAGED_FILES}" | grep -Ev '(^|/)(Cargo[.]lock|package-lock[.]json|pnpm-lock[.]yaml|yarn[.]lock|go[.]sum)$' | grep -q .; then
echo "INFO: Lockfile-only staged change detected; preserving staged lockfiles."
else
- MATCHED_LOCKFILES="$(printf '%s\n' "${STAGED_FILES}" | awk '$0 ~ /(^|\/)(Cargo\.lock|package-lock\.json|pnpm-lock\.yaml|yarn\.lock|go\.sum)$/ { print }')"
+ MATCHED_LOCKFILES="$(printf '%s\n' "${STAGED_FILES}" | grep -E '(^|/)(Cargo[.]lock|package-lock[.]json|pnpm-lock[.]yaml|yarn[.]lock|go[.]sum)$' || true)"
if [ -n "${MATCHED_LOCKFILES}" ]; then
printf '%s\n' "${MATCHED_LOCKFILES}" | while read -r lockpath; do
echo "INFO: Unstaging incidental lockfile change: ${lockpath}"
@@ -132,11 +215,11 @@ if git ls-files --others --exclude-standard | grep -q .; then
git ls-files --others --exclude-standard
exit 1
fi
-```"""
+```'''
on_fail = "abort"
[[workflow.steps]]
-id = 6
+id = 8
title = "Security Scan"
tool = "bash"
prompt = """
@@ -154,10 +237,10 @@ done
on_fail = "abort"
[[workflow.steps]]
-id = 7
+id = 9
title = "Security Audit"
tool = "bash"
-prompt = """
+prompt = '''
Run the security-audit skill: test completeness check, vulnerability scan,
and code quality check. The audit MUST pass before commit.
@@ -186,7 +269,7 @@ if [ "${AUDIT_STATUS}" -ne 0 ]; then
echo "ERROR: security-audit command failed (exit=${AUDIT_STATUS})." >&2
exit 1
fi
-VERDICT="$(printf '%s\n' "${AUDIT_OUTPUT}" | sed -nE 's/^SECURITY_AUDIT_VERDICT:[[:space:]]*(PASS_DEFERRED|PASS|FAIL)$/\\1/p' | tail -n1)"
+VERDICT="$(printf '%s\n' "${AUDIT_OUTPUT}" | sed -nE 's/^SECURITY_AUDIT_VERDICT:[[:space:]]*(PASS_DEFERRED|PASS|FAIL)$/\1/p' | tail -n1)"
if [ -z "${VERDICT}" ]; then
echo "ERROR: Missing SECURITY_AUDIT_VERDICT marker in audit output." >&2
exit 1
@@ -196,22 +279,27 @@ if [ "${VERDICT}" = "FAIL" ]; then
exit 1
fi
echo "SECURITY_AUDIT_VERDICT=${VERDICT}"
-```"""
+```'''
on_fail = "abort"
[[workflow.steps]]
-id = 8
+id = 10
title = "Pre-Commit Review"
tool = "csa"
prompt = """
Run heterogeneous code review on all uncommitted changes versus HEAD.
The reviewer MUST be a different model family than the code author.
+
+```bash
+csa review --diff
+```
+
Review output includes AGENTS.md compliance checklist."""
tier = "tier-2-standard"
on_fail = "abort"
[[workflow.steps]]
-id = 9
+id = 11
title = "Fix Review Issues"
tool = "csa"
prompt = """
@@ -224,7 +312,7 @@ condition = "${REVIEW_HAS_ISSUES}"
retry = 3
[[workflow.steps]]
-id = 10
+id = 12
title = "Re-run Quality Gates"
tool = "bash"
prompt = """
@@ -237,18 +325,18 @@ on_fail = "abort"
condition = "${REVIEW_HAS_ISSUES}"
[[workflow.steps]]
-id = 11
+id = 13
title = "Re-review"
tool = "csa"
prompt = """
-Re-review all changes to verify all issues are resolved.
+Run `csa review --diff` again to verify all issues are resolved.
Loop back to Step 9 if issues persist (max 3 rounds)."""
tier = "tier-2-standard"
on_fail = "abort"
condition = "${REVIEW_HAS_ISSUES}"
[[workflow.steps]]
-id = 12
+id = 14
title = "Generate Commit Message"
tool = "bash"
prompt = """
@@ -260,19 +348,24 @@ scripts/gen_commit_msg.sh "${SCOPE:-}"
on_fail = "abort"
[[workflow.steps]]
-id = 13
+id = 15
title = "Commit"
tool = "bash"
prompt = """
-Create the commit using the generated message: ${COMMIT_MSG}.
+Create the commit using the generated message from Step 12.
```bash
-git commit -m "${COMMIT_MSG}"
+COMMIT_MSG_LOCAL="${STEP_12_OUTPUT:-${COMMIT_MSG:-}}"
+if [ -z "${COMMIT_MSG_LOCAL}" ]; then
+ echo "ERROR: Commit message is empty. Step 12 must output a commit message." >&2
+ exit 1
+fi
+git commit -m "${COMMIT_MSG_LOCAL}"
```"""
on_fail = "abort"
[[workflow.steps]]
-id = 14
+id = 16
title = "Ensure Version Bumped"
tool = "bash"
prompt = """
@@ -308,12 +401,12 @@ git commit -m "chore(release): bump workspace version to ${VERSION}"
on_fail = "abort"
[[workflow.steps]]
-id = 15
+id = 17
title = "Pre-PR Cumulative Review"
tool = "csa"
prompt = """
Run a cumulative review covering ALL commits on the feature branch since main.
-This is distinct from Step 8's per-commit review (csa review --diff):
+This is distinct from Step 8's per-commit review (`csa review --diff`):
- Step 8 reviews uncommitted changes (staged diff) — single-commit granularity.
- This step reviews the full feature branch — catches cross-commit issues.
@@ -327,13 +420,18 @@ tier = "tier-2-standard"
on_fail = "abort"
[[workflow.steps]]
-id = 16
+id = 18
title = "Push to Origin"
tool = "bash"
prompt = """
Push the feature branch to the remote origin.
```bash
+BRANCH="$(git branch --show-current)"
+if [ -z "${BRANCH}" ] || [ "${BRANCH}" = "HEAD" ]; then
+ echo "ERROR: Cannot determine current branch for push."
+ exit 1
+fi
git push -u origin "${BRANCH}"
```"""
@@ -341,175 +439,350 @@ git push -u origin "${BRANCH}"
retry = 2
[[workflow.steps]]
-id = 17
+id = 19
title = "Create Pull Request"
tool = "bash"
-prompt = """
+prompt = '''
Create a PR targeting main via GitHub CLI. The PR body includes a summary
of changes for ${SCOPE} and a test plan checklist covering tests, linting,
security audit, and codex review.
```bash
-gh pr create --base main --repo "${REPO}" --title "${COMMIT_MSG}" --body "${PR_BODY}"
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
+COMMIT_MSG_LOCAL="${STEP_12_OUTPUT:-${COMMIT_MSG:-}}"
+if [ -z "${COMMIT_MSG_LOCAL}" ]; then
+ echo "ERROR: PR title is empty. Step 12 output is required." >&2
+ exit 1
+fi
+PR_BODY_LOCAL="${PR_BODY:-## Summary
+- Scope: ${SCOPE:-unspecified}'''
+on_fail = "abort"
+
+[[workflow.steps]]
+id = 20
+title = "Validation"
+prompt = """
+- just fmt
+- just clippy
+- just test
+- csa review --range main...HEAD
+}"
+gh pr create --base main --repo "${REPO_LOCAL}" --title "${COMMIT_MSG_LOCAL}" --body "${PR_BODY_LOCAL}"
```"""
on_fail = "abort"
[[workflow.steps]]
-id = 18
+id = 21
title = "Trigger Codex Bot Review"
tool = "bash"
-prompt = """
+prompt = '''
Trigger the cloud codex review bot on the newly created PR.
Capture the PR number for polling.
```bash
set -euo pipefail
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
PR_NUM=$(gh pr view --json number -q '.number')
-gh pr comment "${PR_NUM}" --repo "${REPO}" --body "@codex review"
+COMMENT_URL="$(gh pr comment "${PR_NUM}" --repo "${REPO_LOCAL}" --body "@codex review")"
SELF_LOGIN=$(gh api user -q '.login')
-COMMENTS_PAYLOAD=$(gh pr view "${PR_NUM}" --repo "${REPO}" --json comments)
-TRIGGER_TS=$(printf '%s' "${COMMENTS_PAYLOAD}" | jq -r --arg me "${SELF_LOGIN}" '[.comments[]? | select(.author.login == $me and .body == "@codex review")] | sort_by(.createdAt) | last | .createdAt // empty')
+COMMENTS_PAYLOAD=$(gh api "repos/${REPO_LOCAL}/issues/${PR_NUM}/comments?per_page=100")
+TRIGGER_TS=$(printf '%s' "${COMMENTS_PAYLOAD}" | jq -r --arg me "${SELF_LOGIN}" '[.[]? | select((.user.login // "") == $me and (.body // "") == "@codex review")] | sort_by(.created_at) | last | .created_at // empty')
+TRIGGER_COMMENT_ID=$(printf '%s' "${COMMENT_URL}" | sed -nE 's#.*issuecomment-([0-9]+).*#\1#p')
+if [ -z "${TRIGGER_COMMENT_ID}" ]; then
+ TRIGGER_COMMENT_ID=$(printf '%s' "${COMMENTS_PAYLOAD}" | jq -r --arg me "${SELF_LOGIN}" '[.[]? | select((.user.login // "") == $me and (.body // "") == "@codex review")] | sort_by(.created_at) | last | .id // empty')
+fi
if [ -z "${TRIGGER_TS}" ]; then
TRIGGER_TS=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
fi
-printf 'PR_NUM=%s\nTRIGGER_TS=%s\n' "${PR_NUM}" "${TRIGGER_TS}"
-```"""
+printf 'PR_NUM=%s\nTRIGGER_TS=%s\nTRIGGER_COMMENT_ID=%s\n' "${PR_NUM}" "${TRIGGER_TS}" "${TRIGGER_COMMENT_ID}"
+```'''
on_fail = "abort"
[[workflow.steps]]
-id = 19
+id = 22
title = "Poll for Bot Response"
tool = "bash"
-prompt = """
-Poll for bot review response with a bounded timeout (max 10 minutes).
-If the bot does not respond, fall through to UNAVAILABLE handling.
+prompt = '''
+Poll for bot review response with a bounded timeout (max 20 minutes).
+Output `1` when bot findings are present; output empty string otherwise.
```bash
-TIMEOUT=600; INTERVAL=30; ELAPSED=0
+TIMEOUT=1200; INTERVAL=30; ELAPSED=0
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
PR_NUM_FROM_STEP="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
TRIGGER_TS="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^TRIGGER_TS=//p' | tail -n1)"
+TRIGGER_COMMENT_ID="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^TRIGGER_COMMENT_ID=//p' | tail -n1)"
if [ -z "${PR_NUM_FROM_STEP}" ]; then PR_NUM_FROM_STEP="${PR_NUM}"; fi
if [ -z "${TRIGGER_TS}" ]; then TRIGGER_TS="1970-01-01T00:00:00Z"; fi
while [ "$ELAPSED" -lt "$TIMEOUT" ]; do
- PAYLOAD=$(gh pr view "${PR_NUM_FROM_STEP}" --repo "${REPO}" --json comments,reviews)
- BOT_COMMENTS=$(printf '%s' "${PAYLOAD}" | jq -r --arg ts "${TRIGGER_TS}" '[.comments[]? | select(.createdAt >= $ts and (.author.login | ascii_downcase | test("codex|bot|connector")) and (((.body // "") | ascii_downcase | contains("@codex review")) | not))] | length')
- BOT_REVIEWS=$(printf '%s' "${PAYLOAD}" | jq -r --arg ts "${TRIGGER_TS}" '[.reviews[]? | select(.submittedAt >= $ts and (.author.login | ascii_downcase | test("codex|bot|connector")))] | length')
- if [ "${BOT_COMMENTS}" -gt 0 ] || [ "${BOT_REVIEWS}" -gt 0 ]; then
- echo "Bot response received."
+ BOT_INLINE_COMMENTS=$(gh api "repos/${REPO_LOCAL}/pulls/${PR_NUM_FROM_STEP}/comments?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select(.created_at >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")))] | length')
+ BOT_PR_COMMENTS=$(gh api "repos/${REPO_LOCAL}/issues/${PR_NUM_FROM_STEP}/comments?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.created_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")) and (((.body // "") | ascii_downcase | contains("@codex review")) | not))] | length')
+ BOT_PR_FINDINGS=$(gh api "repos/${REPO_LOCAL}/issues/${PR_NUM_FROM_STEP}/comments?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.created_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")) and (((.body // "") | ascii_downcase | contains("@codex review")) | not) and ((.body // "") | ascii_downcase | test("(^|[^a-z0-9])p[0-3]([^a-z0-9]|$)|changes requested|must fix|blocking|severity|critical")))] | length')
+ BOT_REVIEWS=$(gh api "repos/${REPO_LOCAL}/pulls/${PR_NUM_FROM_STEP}/reviews?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.submitted_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")))] | length')
+ BOT_REVIEW_FINDINGS=$(gh api "repos/${REPO_LOCAL}/pulls/${PR_NUM_FROM_STEP}/reviews?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.submitted_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")) and ((((.state // "") | ascii_downcase) == "changes_requested") or ((.body // "") | ascii_downcase | test("(^|[^a-z0-9])p[0-3]([^a-z0-9]|$)|changes requested|must fix|blocking|severity|critical"))))] | length')
+ BOT_TRIGGER_REACTIONS=0
+ if [ -n "${TRIGGER_COMMENT_ID}" ]; then
+ BOT_TRIGGER_REACTIONS=$(gh api "repos/${REPO_LOCAL}/issues/comments/${TRIGGER_COMMENT_ID}/reactions?per_page=100" -H "Accept: application/vnd.github+json" | jq -r '[.[]? | select((.user.login | ascii_downcase | test("codex|bot|connector")))] | length')
+ fi
+ echo "heartbeat elapsed=${ELAPSED}s inline=${BOT_INLINE_COMMENTS} pr_comments=${BOT_PR_COMMENTS} pr_findings=${BOT_PR_FINDINGS} reviews=${BOT_REVIEWS} review_findings=${BOT_REVIEW_FINDINGS} reactions=${BOT_TRIGGER_REACTIONS}"
+ if [ "${BOT_INLINE_COMMENTS}" -gt 0 ] || [ "${BOT_PR_FINDINGS}" -gt 0 ] || [ "${BOT_REVIEW_FINDINGS}" -gt 0 ]; then
+ echo "1"
+ exit 0
+ fi
+ if [ "${BOT_PR_COMMENTS}" -gt 0 ] || [ "${BOT_REVIEWS}" -gt 0 ] || [ "${BOT_TRIGGER_REACTIONS}" -gt 0 ]; then
+ echo ""
exit 0
fi
sleep "$INTERVAL"
ELAPSED=$((ELAPSED + INTERVAL))
done
-echo "Bot did not respond within timeout."
+echo "ERROR: Timed out waiting for bot response." >&2
exit 1
-```"""
-on_fail = "skip"
+```'''
+on_fail = "abort"
[[workflow.steps]]
-id = 20
+id = 23
title = "Evaluate Bot Comments"
tool = "csa"
prompt = """
-For each bot comment, classify as:
-- Category A (already fixed): react and acknowledge
-- Category B (suspected false positive): queue for arbitration
-- Category C (real issue): react and queue for fix"""
+Evaluate all inline bot findings on the PR and produce a consolidated action plan.
+List suspected false positives and real defects separately."""
tier = "tier-2-standard"
on_fail = "abort"
-condition = "${BOT_HAS_ISSUES}"
+condition = "${STEP_19_OUTPUT}"
[[workflow.steps]]
-id = 21
-title = "Process Comment"
+id = 24
+title = "Arbitrate Disputed Findings"
tool = "csa"
prompt = """
-Evaluate this specific bot comment against the current code state.
-Determine category (A/B/C) and take appropriate action."""
+For disputed findings, run independent arbitration using `csa debate` and
+produce a verdict for each disputed item."""
on_fail = "abort"
-condition = "${BOT_HAS_ISSUES}"
-
-[workflow.steps.loop_var]
-variable = "comment"
-collection = "${BOT_COMMENTS}"
+condition = "${STEP_19_OUTPUT}"
[[workflow.steps]]
-id = 22
-title = "Arbitrate False Positive"
+id = 25
+title = "Fix Confirmed Issues"
tool = "csa"
prompt = """
-Evaluate the following code reviewer finding independently.
-The arbiter MUST be a different model family.
-
-A code reviewer flagged: ${COMMENT_TEXT}. Evaluate independently.
-Is this a real issue or a false positive? Provide reasoning."""
+Implement fixes for confirmed bot findings and create commit(s) with clear
+messages. Do not modify unrelated files."""
tier = "tier-2-standard"
on_fail = "abort"
-condition = "${BOT_HAS_ISSUES}"
-
-[workflow.steps.loop_var]
-variable = "comment"
-collection = "${BOT_COMMENTS}"
+condition = "${STEP_19_OUTPUT}"
[[workflow.steps]]
-id = 23
-title = "Fix Real Issue"
+id = 26
+title = "Re-run Local Review After Fixes"
tool = "csa"
-prompt = "Fix the real issue identified by the bot. Commit the fix."
+prompt = "Run `csa review --diff` to validate fixes before re-triggering cloud review."
tier = "tier-2-standard"
-condition = "${BOT_HAS_ISSUES}"
+condition = "${STEP_19_OUTPUT}"
[workflow.steps.on_fail]
retry = 2
-[workflow.steps.loop_var]
-variable = "comment"
-collection = "${BOT_COMMENTS}"
-
[[workflow.steps]]
-id = 24
+id = 27
title = "Push Fixes and Re-trigger Review"
tool = "bash"
-prompt = """
+prompt = '''
Push all fix commits and trigger a new round of codex review.
```bash
set -euo pipefail
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
+BRANCH="$(git branch --show-current)"
+if [ -z "${BRANCH}" ] || [ "${BRANCH}" = "HEAD" ]; then
+ echo "ERROR: Cannot determine current branch for push."
+ exit 1
+fi
git push origin "${BRANCH}"
-gh pr comment "${PR_NUM}" --repo "${REPO}" --body "@codex review"
+PR_NUM_LOCAL="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
+if [ -z "${PR_NUM_LOCAL}" ]; then
+ PR_NUM_LOCAL="$(gh pr view --json number -q '.number')"
+fi
+COMMENT_URL="$(gh pr comment "${PR_NUM_LOCAL}" --repo "${REPO_LOCAL}" --body "@codex review")"
SELF_LOGIN=$(gh api user -q '.login')
-COMMENTS_PAYLOAD=$(gh pr view "${PR_NUM}" --repo "${REPO}" --json comments)
-TRIGGER_TS=$(printf '%s' "${COMMENTS_PAYLOAD}" | jq -r --arg me "${SELF_LOGIN}" '[.comments[]? | select(.author.login == $me and .body == "@codex review")] | sort_by(.createdAt) | last | .createdAt // empty')
+COMMENTS_PAYLOAD=$(gh api "repos/${REPO_LOCAL}/issues/${PR_NUM_LOCAL}/comments?per_page=100")
+TRIGGER_TS=$(printf '%s' "${COMMENTS_PAYLOAD}" | jq -r --arg me "${SELF_LOGIN}" '[.[]? | select((.user.login // "") == $me and (.body // "") == "@codex review")] | sort_by(.created_at) | last | .created_at // empty')
+TRIGGER_COMMENT_ID=$(printf '%s' "${COMMENT_URL}" | sed -nE 's#.*issuecomment-([0-9]+).*#\1#p')
+if [ -z "${TRIGGER_COMMENT_ID}" ]; then
+ TRIGGER_COMMENT_ID=$(printf '%s' "${COMMENTS_PAYLOAD}" | jq -r --arg me "${SELF_LOGIN}" '[.[]? | select((.user.login // "") == $me and (.body // "") == "@codex review")] | sort_by(.created_at) | last | .id // empty')
+fi
if [ -z "${TRIGGER_TS}" ]; then
TRIGGER_TS=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
fi
-printf 'PR_NUM=%s\nTRIGGER_TS=%s\n' "${PR_NUM}" "${TRIGGER_TS}"
-```"""
+printf 'PR_NUM=%s\nTRIGGER_TS=%s\nTRIGGER_COMMENT_ID=%s\n' "${PR_NUM_LOCAL}" "${TRIGGER_TS}" "${TRIGGER_COMMENT_ID}"
+```'''
on_fail = "abort"
-condition = "${BOT_HAS_ISSUES}"
+condition = "${STEP_19_OUTPUT}"
[[workflow.steps]]
-id = 25
-title = "Bot Review Clean"
+id = 28
+title = "Poll Re-triggered Bot Response"
+tool = "bash"
+prompt = '''
+After posting the second `@codex review`, poll again with bounded timeout.
+Output `1` when findings remain; output empty string when clean.
+
+```bash
+TIMEOUT=1200; INTERVAL=30; ELAPSED=0
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
+PR_NUM_FROM_STEP="$(printf '%s\n' "${STEP_24_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
+TRIGGER_TS="$(printf '%s\n' "${STEP_24_OUTPUT:-}" | sed -n 's/^TRIGGER_TS=//p' | tail -n1)"
+TRIGGER_COMMENT_ID="$(printf '%s\n' "${STEP_24_OUTPUT:-}" | sed -n 's/^TRIGGER_COMMENT_ID=//p' | tail -n1)"
+if [ -z "${PR_NUM_FROM_STEP}" ]; then
+ PR_NUM_FROM_STEP="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
+fi
+if [ -z "${PR_NUM_FROM_STEP}" ]; then
+ PR_NUM_FROM_STEP="$(gh pr view --json number -q '.number')"
+fi
+if [ -z "${TRIGGER_TS}" ]; then TRIGGER_TS="1970-01-01T00:00:00Z"; fi
+while [ "$ELAPSED" -lt "$TIMEOUT" ]; do
+ BOT_INLINE_COMMENTS=$(gh api "repos/${REPO_LOCAL}/pulls/${PR_NUM_FROM_STEP}/comments?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select(.created_at >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")))] | length')
+ BOT_PR_COMMENTS=$(gh api "repos/${REPO_LOCAL}/issues/${PR_NUM_FROM_STEP}/comments?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.created_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")) and (((.body // "") | ascii_downcase | contains("@codex review")) | not))] | length')
+ BOT_PR_FINDINGS=$(gh api "repos/${REPO_LOCAL}/issues/${PR_NUM_FROM_STEP}/comments?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.created_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")) and (((.body // "") | ascii_downcase | contains("@codex review")) | not) and ((.body // "") | ascii_downcase | test("(^|[^a-z0-9])p[0-3]([^a-z0-9]|$)|changes requested|must fix|blocking|severity|critical")))] | length')
+ BOT_REVIEWS=$(gh api "repos/${REPO_LOCAL}/pulls/${PR_NUM_FROM_STEP}/reviews?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.submitted_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")))] | length')
+ BOT_REVIEW_FINDINGS=$(gh api "repos/${REPO_LOCAL}/pulls/${PR_NUM_FROM_STEP}/reviews?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.submitted_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")) and ((((.state // "") | ascii_downcase) == "changes_requested") or ((.body // "") | ascii_downcase | test("(^|[^a-z0-9])p[0-3]([^a-z0-9]|$)|changes requested|must fix|blocking|severity|critical"))))] | length')
+ BOT_TRIGGER_REACTIONS=0
+ if [ -n "${TRIGGER_COMMENT_ID}" ]; then
+ BOT_TRIGGER_REACTIONS=$(gh api "repos/${REPO_LOCAL}/issues/comments/${TRIGGER_COMMENT_ID}/reactions?per_page=100" -H "Accept: application/vnd.github+json" | jq -r '[.[]? | select((.user.login | ascii_downcase | test("codex|bot|connector")))] | length')
+ fi
+ echo "heartbeat elapsed=${ELAPSED}s inline=${BOT_INLINE_COMMENTS} pr_comments=${BOT_PR_COMMENTS} pr_findings=${BOT_PR_FINDINGS} reviews=${BOT_REVIEWS} review_findings=${BOT_REVIEW_FINDINGS} reactions=${BOT_TRIGGER_REACTIONS}"
+ if [ "${BOT_INLINE_COMMENTS}" -gt 0 ] || [ "${BOT_PR_FINDINGS}" -gt 0 ] || [ "${BOT_REVIEW_FINDINGS}" -gt 0 ]; then
+ echo "1"
+ exit 0
+ fi
+ if [ "${BOT_PR_COMMENTS}" -gt 0 ] || [ "${BOT_REVIEWS}" -gt 0 ] || [ "${BOT_TRIGGER_REACTIONS}" -gt 0 ]; then
+ echo ""
+ exit 0
+ fi
+ sleep "$INTERVAL"
+ ELAPSED=$((ELAPSED + INTERVAL))
+done
+echo "ERROR: Timed out waiting for re-triggered bot response." >&2
+exit 1
+```'''
+on_fail = "abort"
+condition = "${STEP_19_OUTPUT}"
+
+[[workflow.steps]]
+id = 29
+title = "Stop on Remaining Bot Findings"
tool = "bash"
prompt = """
-No issues found by the codex bot. Proceed to merge.
+Abort merge when re-triggered bot review still reports findings.
```bash
-echo "Bot review clean — no issues found. Proceeding to merge."
+echo "ERROR: Bot review still has findings after re-trigger. Do not merge." >&2
+exit 1
```"""
on_fail = "abort"
-condition = "(!(${BOT_HAS_ISSUES}))"
+condition = "(${STEP_19_OUTPUT}) && (${STEP_25_OUTPUT})"
[[workflow.steps]]
-id = 26
-title = "Merge PR"
+id = 30
+title = "Merge PR After Re-review Clean"
tool = "bash"
-prompt = """
-Squash-merge the PR and update local main.
+prompt = '''
+Squash-merge the PR after the second bot review returns clean, then update local main.
+
+```bash
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
+PR_NUM_LOCAL="$(printf '%s\n' "${STEP_24_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
+if [ -z "${PR_NUM_LOCAL}" ]; then
+ PR_NUM_LOCAL="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
+fi
+if [ -z "${PR_NUM_LOCAL}" ]; then
+ PR_NUM_LOCAL="$(gh pr view --json number -q '.number')"
+fi
+gh pr merge "${PR_NUM_LOCAL}" --repo "${REPO_LOCAL}" --squash --delete-branch
+git checkout main && git pull origin main
+```'''
+on_fail = "abort"
+condition = "(${STEP_19_OUTPUT}) && (!(${STEP_25_OUTPUT}))"
+
+[[workflow.steps]]
+id = 31
+title = "Merge PR (Initial Review Clean)"
+tool = "bash"
+prompt = '''
+No issues were found in the initial bot review. Merge using the PR number from step output.
```bash
-gh pr merge "${PR_NUM}" --repo "${REPO}" --squash --delete-branch
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
+PR_NUM_LOCAL="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
+if [ -z "${PR_NUM_LOCAL}" ]; then
+ PR_NUM_LOCAL="$(gh pr view --json number -q '.number')"
+fi
+gh pr merge "${PR_NUM_LOCAL}" --repo "${REPO_LOCAL}" --squash --delete-branch
git checkout main && git pull origin main
-```"""
+```'''
on_fail = "abort"
+condition = "!(${STEP_19_OUTPUT})"
diff --git a/patterns/dev2merge/.skill.toml b/patterns/dev2merge/.skill.toml
new file mode 100644
index 00000000..ef8c8e00
--- /dev/null
+++ b/patterns/dev2merge/.skill.toml
@@ -0,0 +1,8 @@
+[skill]
+name = "dev2merge"
+version = "0.1.0"
+
+[agent]
+tier = "tier-3-complex"
+max_turns = 50
+tools = [{ tool = "claude-code" }]
diff --git a/patterns/dev2merge/FINDINGS.md b/patterns/dev2merge/FINDINGS.md
new file mode 100644
index 00000000..f65f73d1
--- /dev/null
+++ b/patterns/dev2merge/FINDINGS.md
@@ -0,0 +1,49 @@
+# Dev2Merge Pattern: Compilation Findings
+
+## Summary
+
+The current `dev2merge` workflow compiles successfully and implements a
+27-step end-to-end branch-to-merge pipeline with mandatory planning via
+`mktd` (including mktd-internal debate), quality gates, local/cumulative
+review, cloud codex review loop, and final merge.
+
+## Current Workflow Shape
+
+1. Validate branch safety (no direct work on protected branch).
+2. Enforce planning gate through `mktd`, then verify TODO artifacts
+ (checkbox tasks + `DONE WHEN`).
+3. Run `just fmt`, `just clippy`, and `just test`.
+4. Stage changes with lockfile-aware guardrails.
+5. Run security scan + `security-audit` gate.
+6. Run local review (`csa review --diff`) and fix loop when needed.
+7. Generate commit message and commit.
+8. Push branch, create PR, and trigger cloud codex review.
+9. Poll review response (inline comments + PR comments + reviews).
+10. If findings exist: evaluate, arbitrate disputed items via debate,
+ fix, rerun local review, push, retrigger bot.
+11. If clean: merge PR.
+
+## Key Improvements Captured
+
+- Added mandatory mktd planning gate before development gates.
+- Migrated review handling from per-comment loop to consolidated analysis
+ steps (better context and lower orchestration complexity).
+- Hardened repository resolution with `gh repo view` primary path and
+ remote URL fallback, including `.git` suffix normalization.
+- Added top-level PR comments polling (in addition to inline comments and
+ reviews) to reduce missed bot findings.
+- Added explicit branch detection guards before push operations.
+
+## Known Tradeoffs
+
+- `REPO_LOCAL` resolution block is intentionally duplicated across multiple
+ bash steps for robustness and local step self-sufficiency.
+- Bot identity detection currently uses a heuristic login regex
+ (`codex|bot|connector`) and may require updates if provider naming changes.
+
+## Validation Snapshot
+
+- `weave compile` succeeds for `patterns/dev2merge/PATTERN.md`.
+- Local gates expected by this pattern (`fmt`, `clippy`, `test`, review)
+ are runnable and integrated.
+- Pattern and workflow definitions are synchronized for current behavior.
diff --git a/patterns/dev2merge/PATTERN.md b/patterns/dev2merge/PATTERN.md
new file mode 100644
index 00000000..6dfa5a04
--- /dev/null
+++ b/patterns/dev2merge/PATTERN.md
@@ -0,0 +1,678 @@
+---
+name = "dev2merge"
+description = "Full development cycle from branch creation through mktd planning, commit, PR, codex-bot review, and merge"
+allowed-tools = "Bash, Read, Edit, Write, Grep, Glob, Task"
+tier = "tier-3-complex"
+version = "0.1.0"
+---
+
+# Dev2Merge Workflow
+
+End-to-end development workflow: implement code on a feature branch, pass all
+quality gates, commit with Conventional Commits, create a PR, run codex-bot
+review loop, and merge to main. Planning is mandatory via `mktd`, and `mktd`
+internally requires adversarial `debate` evidence.
+
+## Step 1: Validate Branch
+
+Tool: bash
+OnFail: abort
+
+Verify the current branch is a feature branch, not a protected branch.
+If on main or dev, abort immediately.
+
+```bash
+BRANCH="$(git branch --show-current)"
+if [ -z "${BRANCH}" ] || [ "${BRANCH}" = "HEAD" ]; then
+ echo "ERROR: Cannot determine current branch."
+ exit 1
+fi
+DEFAULT_BRANCH=$(git symbolic-ref refs/remotes/origin/HEAD 2>/dev/null | sed 's@^refs/remotes/origin/@@')
+if [ -z "$DEFAULT_BRANCH" ]; then DEFAULT_BRANCH="main"; fi
+if [ "$BRANCH" = "$DEFAULT_BRANCH" ] || [ "$BRANCH" = "dev" ]; then
+ echo "ERROR: Cannot work directly on $BRANCH. Create a feature branch."
+ exit 1
+fi
+```
+
+## Step 1.5: Plan with mktd (Debate Required)
+
+Tool: bash
+OnFail: abort
+
+Generate or refresh a branch TODO plan through `mktd` before development gates.
+This step MUST pass through mktd's built-in debate phase and save a TODO.
+
+## INCLUDE mktd
+
+```bash
+set -euo pipefail
+CURRENT_BRANCH="$(git branch --show-current)"
+FEATURE_INPUT="${SCOPE:-current branch changes pending merge}"
+MKTD_PROMPT="Plan dev2merge execution for branch ${CURRENT_BRANCH}. Scope: ${FEATURE_INPUT}. Must execute full mktd workflow and save TODO."
+set +e
+MKTD_OUTPUT="$(csa run --skill mktd "${MKTD_PROMPT}" 2>&1)"
+MKTD_STATUS=$?
+set -e
+printf '%s\n' "${MKTD_OUTPUT}"
+if [ "${MKTD_STATUS}" -ne 0 ]; then
+ echo "ERROR: mktd failed (exit=${MKTD_STATUS})." >&2
+ exit 1
+fi
+LATEST_TS="$(csa todo list --format json | jq -r --arg br "${CURRENT_BRANCH}" '[.[] | select(.branch == $br)] | sort_by(.timestamp) | last | .timestamp // empty')"
+if [ -z "${LATEST_TS}" ]; then
+ echo "ERROR: mktd did not produce a TODO for branch ${CURRENT_BRANCH}." >&2
+ exit 1
+fi
+TODO_PATH="$(csa todo show -t "${LATEST_TS}" --path)"
+if [ ! -s "${TODO_PATH}" ]; then
+ echo "ERROR: TODO file is empty: ${TODO_PATH}" >&2
+ exit 1
+fi
+grep -qF -- '- [ ] ' "${TODO_PATH}" || { echo "ERROR: TODO missing checkbox tasks: ${TODO_PATH}" >&2; exit 1; }
+grep -q 'DONE WHEN:' "${TODO_PATH}" || { echo "ERROR: TODO missing DONE WHEN clauses: ${TODO_PATH}" >&2; exit 1; }
+printf 'MKTD_TODO_TIMESTAMP=%s\nMKTD_TODO_PATH=%s\n' "${LATEST_TS}" "${TODO_PATH}"
+```
+
+## Step 2: Run Formatters
+
+Tool: bash
+OnFail: retry 2
+
+Run the project formatter to ensure consistent code style.
+
+```bash
+just fmt
+```
+
+## Step 3: Run Linters
+
+Tool: bash
+OnFail: retry 2
+
+Run linters to catch static analysis issues.
+
+```bash
+just clippy
+```
+
+## Step 4: Run Tests
+
+Tool: bash
+OnFail: abort
+
+Run the full test suite. All tests must pass before proceeding.
+
+```bash
+just test
+```
+
+## Step 5: Stage Changes
+
+Tool: bash
+
+Stage all modified and new files relevant to ${SCOPE}.
+Verify no untracked files remain.
+
+```bash
+git add -A
+if ! printf '%s' "${SCOPE:-}" | grep -Eqi 'release|version|lock|deps|dependency'; then
+ STAGED_FILES="$(git diff --cached --name-only)"
+ if printf '%s\n' "${STAGED_FILES}" | grep -Eq '(^|/)Cargo[.]toml$|(^|/)package[.]json$|(^|/)pnpm-workspace[.]yaml$|(^|/)go[.]mod$'; then
+ echo "INFO: Dependency manifest change detected; preserving staged lockfiles."
+ elif ! printf '%s\n' "${STAGED_FILES}" | grep -Ev '(^|/)(Cargo[.]lock|package-lock[.]json|pnpm-lock[.]yaml|yarn[.]lock|go[.]sum)$' | grep -q .; then
+ echo "INFO: Lockfile-only staged change detected; preserving staged lockfiles."
+ else
+ MATCHED_LOCKFILES="$(printf '%s\n' "${STAGED_FILES}" | grep -E '(^|/)(Cargo[.]lock|package-lock[.]json|pnpm-lock[.]yaml|yarn[.]lock|go[.]sum)$' || true)"
+ if [ -n "${MATCHED_LOCKFILES}" ]; then
+ printf '%s\n' "${MATCHED_LOCKFILES}" | while read -r lockpath; do
+ echo "INFO: Unstaging incidental lockfile change: ${lockpath}"
+ git restore --staged -- "${lockpath}"
+ done
+ fi
+ fi
+fi
+if ! git diff --cached --name-only | grep -q .; then
+ echo "ERROR: No staged files remain after scope filtering."
+ exit 1
+fi
+if git ls-files --others --exclude-standard | grep -q .; then
+ echo "ERROR: Untracked files detected."
+ git ls-files --others --exclude-standard
+ exit 1
+fi
+```
+
+## Step 6: Security Scan
+
+Tool: bash
+OnFail: abort
+
+Check for hardcoded secrets, debug statements, and commented-out code
+in staged files. Runs after staging so `git diff --cached` covers all changes.
+
+```bash
+git diff --cached --name-only | while read -r file; do
+ if grep -nE '(API_KEY|SECRET|PASSWORD|PRIVATE_KEY)=' "$file" 2>/dev/null; then
+ echo "FAIL: Potential secret in $file"
+ exit 1
+ fi
+done
+```
+
+## Step 7: Security Audit
+
+Tool: bash
+OnFail: abort
+
+Run the security-audit skill: test completeness check, vulnerability scan,
+and code quality check. The audit MUST pass before commit.
+
+Phase 1: Can you propose a test case that does not exist? If yes, FAIL.
+Phase 2: Input validation, size limits, panic risks.
+Phase 3: No debug code, secrets, or commented-out code.
+
+```bash
+AUDIT_PROMPT="Use the security-audit skill.
+Run security-audit against staged changes.
+Output a concise report and end with EXACTLY one line:
+SECURITY_AUDIT_VERDICT: PASS|PASS_DEFERRED|FAIL"
+if command -v timeout >/dev/null 2>&1; then
+ AUDIT_OUTPUT="$(timeout 1200 csa run --skill security-audit "${AUDIT_PROMPT}" 2>&1)"
+ AUDIT_STATUS=$?
+else
+ AUDIT_OUTPUT="$(csa run --skill security-audit "${AUDIT_PROMPT}" 2>&1)"
+ AUDIT_STATUS=$?
+fi
+printf '%s\n' "${AUDIT_OUTPUT}"
+if [ "${AUDIT_STATUS}" -eq 124 ]; then
+ echo "ERROR: security-audit timed out after 1200s." >&2
+ exit 1
+fi
+if [ "${AUDIT_STATUS}" -ne 0 ]; then
+ echo "ERROR: security-audit command failed (exit=${AUDIT_STATUS})." >&2
+ exit 1
+fi
+VERDICT="$(printf '%s\n' "${AUDIT_OUTPUT}" | sed -nE 's/^SECURITY_AUDIT_VERDICT:[[:space:]]*(PASS_DEFERRED|PASS|FAIL)$/\1/p' | tail -n1)"
+if [ -z "${VERDICT}" ]; then
+ echo "ERROR: Missing SECURITY_AUDIT_VERDICT marker in audit output." >&2
+ exit 1
+fi
+if [ "${VERDICT}" = "FAIL" ]; then
+ echo "ERROR: security-audit verdict is FAIL." >&2
+ exit 1
+fi
+echo "SECURITY_AUDIT_VERDICT=${VERDICT}"
+```
+
+## Step 8: Pre-Commit Review
+
+Tool: csa
+Tier: tier-2-standard
+
+Run heterogeneous code review on all uncommitted changes versus HEAD.
+The reviewer MUST be a different model family than the code author.
+
+```bash
+csa review --diff
+```
+
+Review output includes AGENTS.md compliance checklist.
+
+## IF ${REVIEW_HAS_ISSUES}
+
+## Step 9: Fix Review Issues
+
+Tool: csa
+Tier: tier-2-standard
+OnFail: retry 3
+
+Fix each issue identified by the pre-commit review.
+Preserve original code intent. Do NOT delete code to silence warnings.
+
+## Step 10: Re-run Quality Gates
+
+Tool: bash
+OnFail: abort
+
+Re-run formatters, linters, and tests after fixes.
+
+```bash
+just pre-commit
+```
+
+## Step 11: Re-review
+
+Tool: csa
+Tier: tier-2-standard
+
+Run `csa review --diff` again to verify all issues are resolved.
+Loop back to Step 9 if issues persist (max 3 rounds).
+
+## ENDIF
+
+## Step 12: Generate Commit Message
+
+Tool: bash
+OnFail: abort
+
+Generate a deterministic Conventional Commits message from staged files.
+
+```bash
+scripts/gen_commit_msg.sh "${SCOPE:-}"
+```
+
+## Step 13: Commit
+
+Tool: bash
+OnFail: abort
+
+Create the commit using the generated message from Step 12.
+
+```bash
+COMMIT_MSG_LOCAL="${STEP_12_OUTPUT:-${COMMIT_MSG:-}}"
+if [ -z "${COMMIT_MSG_LOCAL}" ]; then
+ echo "ERROR: Commit message is empty. Step 12 must output a commit message." >&2
+ exit 1
+fi
+git commit -m "${COMMIT_MSG_LOCAL}"
+```
+
+## Step 14: Ensure Version Bumped
+
+Tool: bash
+OnFail: abort
+
+Ensure workspace version differs from main before push gate.
+If not bumped yet, auto-bump patch and create a dedicated release commit.
+
+```bash
+set -euo pipefail
+if just check-version-bumped; then
+ echo "Version bump check passed."
+ exit 0
+fi
+PRE_DIRTY_CARGO_LOCK=0
+if git diff --name-only -- Cargo.lock | grep -q .; then
+ PRE_DIRTY_CARGO_LOCK=1
+fi
+just bump-patch
+# Use workspace weave binary to avoid stale globally-installed version drift.
+cargo run -p weave -- lock
+git add Cargo.toml weave.lock
+if [ "${PRE_DIRTY_CARGO_LOCK}" -eq 0 ] && [ -f Cargo.lock ]; then
+ git add Cargo.lock
+else
+ echo "INFO: Skipping Cargo.lock in release commit (pre-existing local edits)."
+fi
+if git diff --cached --quiet; then
+ echo "ERROR: Version bump expected changes but none were staged." >&2
+ exit 1
+fi
+VERSION="$(cargo metadata --no-deps --format-version 1 | jq -r '.packages[] | select(.name == "cli-sub-agent") | .version')"
+git commit -m "chore(release): bump workspace version to ${VERSION}"
+```
+
+## Step 15: Pre-PR Cumulative Review
+
+Tool: csa
+Tier: tier-2-standard
+OnFail: abort
+
+Run a cumulative review covering ALL commits on the feature branch since main.
+This is distinct from Step 8's per-commit review (`csa review --diff`):
+- Step 8 reviews uncommitted changes (staged diff) — single-commit granularity.
+- This step reviews the full feature branch — catches cross-commit issues.
+
+MANDATORY: This review MUST pass before pushing to origin.
+
+```bash
+csa review --range main...HEAD
+CUMULATIVE_REVIEW_COMPLETED=true
+```
+
+## Step 16: Push to Origin
+
+Tool: bash
+OnFail: retry 2
+
+Push the feature branch to the remote origin.
+
+```bash
+BRANCH="$(git branch --show-current)"
+if [ -z "${BRANCH}" ] || [ "${BRANCH}" = "HEAD" ]; then
+ echo "ERROR: Cannot determine current branch for push."
+ exit 1
+fi
+git push -u origin "${BRANCH}"
+```
+
+## Step 17: Create Pull Request
+
+Tool: bash
+OnFail: abort
+
+Create a PR targeting main via GitHub CLI. The PR body includes a summary
+of changes for ${SCOPE} and a test plan checklist covering tests, linting,
+security audit, and codex review.
+
+```bash
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
+COMMIT_MSG_LOCAL="${STEP_12_OUTPUT:-${COMMIT_MSG:-}}"
+if [ -z "${COMMIT_MSG_LOCAL}" ]; then
+ echo "ERROR: PR title is empty. Step 12 output is required." >&2
+ exit 1
+fi
+PR_BODY_LOCAL="${PR_BODY:-## Summary
+- Scope: ${SCOPE:-unspecified}
+
+## Validation
+- just fmt
+- just clippy
+- just test
+- csa review --range main...HEAD
+}"
+gh pr create --base main --repo "${REPO_LOCAL}" --title "${COMMIT_MSG_LOCAL}" --body "${PR_BODY_LOCAL}"
+```
+
+## Step 18: Trigger Codex Bot Review
+
+Tool: bash
+
+Trigger the cloud codex review bot on the newly created PR.
+Capture the PR number for polling.
+
+```bash
+set -euo pipefail
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
+PR_NUM=$(gh pr view --json number -q '.number')
+COMMENT_URL="$(gh pr comment "${PR_NUM}" --repo "${REPO_LOCAL}" --body "@codex review")"
+SELF_LOGIN=$(gh api user -q '.login')
+COMMENTS_PAYLOAD=$(gh api "repos/${REPO_LOCAL}/issues/${PR_NUM}/comments?per_page=100")
+TRIGGER_TS=$(printf '%s' "${COMMENTS_PAYLOAD}" | jq -r --arg me "${SELF_LOGIN}" '[.[]? | select((.user.login // "") == $me and (.body // "") == "@codex review")] | sort_by(.created_at) | last | .created_at // empty')
+TRIGGER_COMMENT_ID=$(printf '%s' "${COMMENT_URL}" | sed -nE 's#.*issuecomment-([0-9]+).*#\1#p')
+if [ -z "${TRIGGER_COMMENT_ID}" ]; then
+ TRIGGER_COMMENT_ID=$(printf '%s' "${COMMENTS_PAYLOAD}" | jq -r --arg me "${SELF_LOGIN}" '[.[]? | select((.user.login // "") == $me and (.body // "") == "@codex review")] | sort_by(.created_at) | last | .id // empty')
+fi
+if [ -z "${TRIGGER_TS}" ]; then
+ TRIGGER_TS=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
+fi
+printf 'PR_NUM=%s\nTRIGGER_TS=%s\nTRIGGER_COMMENT_ID=%s\n' "${PR_NUM}" "${TRIGGER_TS}" "${TRIGGER_COMMENT_ID}"
+```
+
+## Step 19: Poll for Bot Response
+
+Tool: bash
+OnFail: abort
+
+Poll for bot review response with a bounded timeout (max 20 minutes).
+Output `1` when bot findings are present; output empty string otherwise.
+
+```bash
+TIMEOUT=1200; INTERVAL=30; ELAPSED=0
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
+PR_NUM_FROM_STEP="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
+TRIGGER_TS="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^TRIGGER_TS=//p' | tail -n1)"
+TRIGGER_COMMENT_ID="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^TRIGGER_COMMENT_ID=//p' | tail -n1)"
+if [ -z "${PR_NUM_FROM_STEP}" ]; then PR_NUM_FROM_STEP="${PR_NUM}"; fi
+if [ -z "${TRIGGER_TS}" ]; then TRIGGER_TS="1970-01-01T00:00:00Z"; fi
+while [ "$ELAPSED" -lt "$TIMEOUT" ]; do
+ BOT_INLINE_COMMENTS=$(gh api "repos/${REPO_LOCAL}/pulls/${PR_NUM_FROM_STEP}/comments?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select(.created_at >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")))] | length')
+ BOT_PR_COMMENTS=$(gh api "repos/${REPO_LOCAL}/issues/${PR_NUM_FROM_STEP}/comments?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.created_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")) and (((.body // "") | ascii_downcase | contains("@codex review")) | not))] | length')
+ BOT_PR_FINDINGS=$(gh api "repos/${REPO_LOCAL}/issues/${PR_NUM_FROM_STEP}/comments?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.created_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")) and (((.body // "") | ascii_downcase | contains("@codex review")) | not) and ((.body // "") | ascii_downcase | test("(^|[^a-z0-9])p[0-3]([^a-z0-9]|$)|changes requested|must fix|blocking|severity|critical")))] | length')
+ BOT_REVIEWS=$(gh api "repos/${REPO_LOCAL}/pulls/${PR_NUM_FROM_STEP}/reviews?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.submitted_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")))] | length')
+ BOT_REVIEW_FINDINGS=$(gh api "repos/${REPO_LOCAL}/pulls/${PR_NUM_FROM_STEP}/reviews?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.submitted_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")) and ((((.state // "") | ascii_downcase) == "changes_requested") or ((.body // "") | ascii_downcase | test("(^|[^a-z0-9])p[0-3]([^a-z0-9]|$)|changes requested|must fix|blocking|severity|critical"))))] | length')
+ BOT_TRIGGER_REACTIONS=0
+ if [ -n "${TRIGGER_COMMENT_ID}" ]; then
+ BOT_TRIGGER_REACTIONS=$(gh api "repos/${REPO_LOCAL}/issues/comments/${TRIGGER_COMMENT_ID}/reactions?per_page=100" -H "Accept: application/vnd.github+json" | jq -r '[.[]? | select((.user.login | ascii_downcase | test("codex|bot|connector")))] | length')
+ fi
+ echo "heartbeat elapsed=${ELAPSED}s inline=${BOT_INLINE_COMMENTS} pr_comments=${BOT_PR_COMMENTS} pr_findings=${BOT_PR_FINDINGS} reviews=${BOT_REVIEWS} review_findings=${BOT_REVIEW_FINDINGS} reactions=${BOT_TRIGGER_REACTIONS}"
+ if [ "${BOT_INLINE_COMMENTS}" -gt 0 ] || [ "${BOT_PR_FINDINGS}" -gt 0 ] || [ "${BOT_REVIEW_FINDINGS}" -gt 0 ]; then
+ echo "1"
+ exit 0
+ fi
+ if [ "${BOT_PR_COMMENTS}" -gt 0 ] || [ "${BOT_REVIEWS}" -gt 0 ] || [ "${BOT_TRIGGER_REACTIONS}" -gt 0 ]; then
+ echo ""
+ exit 0
+ fi
+ sleep "$INTERVAL"
+ ELAPSED=$((ELAPSED + INTERVAL))
+done
+echo "ERROR: Timed out waiting for bot response." >&2
+exit 1
+```
+
+## IF ${STEP_19_OUTPUT}
+
+## Step 20: Evaluate Bot Comments
+
+Tool: csa
+Tier: tier-2-standard
+
+Evaluate all inline bot findings on the PR and produce a consolidated action plan.
+List suspected false positives and real defects separately.
+
+## Step 21: Arbitrate Disputed Findings
+
+Tool: csa
+
+For disputed findings, run independent arbitration using `csa debate` and
+produce a verdict for each disputed item.
+
+## Step 22: Fix Confirmed Issues
+
+Tool: csa
+Tier: tier-2-standard
+
+Implement fixes for confirmed bot findings and create commit(s) with clear
+messages. Do not modify unrelated files.
+
+## Step 23: Re-run Local Review After Fixes
+
+Tool: csa
+Tier: tier-2-standard
+OnFail: retry 2
+
+Run `csa review --diff` to validate fixes before re-triggering cloud review.
+
+## Step 24: Push Fixes and Re-trigger Review
+
+Tool: bash
+
+Push all fix commits and trigger a new round of codex review.
+
+```bash
+set -euo pipefail
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
+BRANCH="$(git branch --show-current)"
+if [ -z "${BRANCH}" ] || [ "${BRANCH}" = "HEAD" ]; then
+ echo "ERROR: Cannot determine current branch for push."
+ exit 1
+fi
+git push origin "${BRANCH}"
+PR_NUM_LOCAL="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
+if [ -z "${PR_NUM_LOCAL}" ]; then
+ PR_NUM_LOCAL="$(gh pr view --json number -q '.number')"
+fi
+COMMENT_URL="$(gh pr comment "${PR_NUM_LOCAL}" --repo "${REPO_LOCAL}" --body "@codex review")"
+SELF_LOGIN=$(gh api user -q '.login')
+COMMENTS_PAYLOAD=$(gh api "repos/${REPO_LOCAL}/issues/${PR_NUM_LOCAL}/comments?per_page=100")
+TRIGGER_TS=$(printf '%s' "${COMMENTS_PAYLOAD}" | jq -r --arg me "${SELF_LOGIN}" '[.[]? | select((.user.login // "") == $me and (.body // "") == "@codex review")] | sort_by(.created_at) | last | .created_at // empty')
+TRIGGER_COMMENT_ID=$(printf '%s' "${COMMENT_URL}" | sed -nE 's#.*issuecomment-([0-9]+).*#\1#p')
+if [ -z "${TRIGGER_COMMENT_ID}" ]; then
+ TRIGGER_COMMENT_ID=$(printf '%s' "${COMMENTS_PAYLOAD}" | jq -r --arg me "${SELF_LOGIN}" '[.[]? | select((.user.login // "") == $me and (.body // "") == "@codex review")] | sort_by(.created_at) | last | .id // empty')
+fi
+if [ -z "${TRIGGER_TS}" ]; then
+ TRIGGER_TS=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
+fi
+printf 'PR_NUM=%s\nTRIGGER_TS=%s\nTRIGGER_COMMENT_ID=%s\n' "${PR_NUM_LOCAL}" "${TRIGGER_TS}" "${TRIGGER_COMMENT_ID}"
+```
+
+## Step 25: Poll Re-triggered Bot Response
+
+Tool: bash
+OnFail: abort
+
+After posting the second `@codex review`, poll again with bounded timeout.
+Output `1` when findings remain; output empty string when clean.
+
+```bash
+TIMEOUT=1200; INTERVAL=30; ELAPSED=0
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
+PR_NUM_FROM_STEP="$(printf '%s\n' "${STEP_24_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
+TRIGGER_TS="$(printf '%s\n' "${STEP_24_OUTPUT:-}" | sed -n 's/^TRIGGER_TS=//p' | tail -n1)"
+TRIGGER_COMMENT_ID="$(printf '%s\n' "${STEP_24_OUTPUT:-}" | sed -n 's/^TRIGGER_COMMENT_ID=//p' | tail -n1)"
+if [ -z "${PR_NUM_FROM_STEP}" ]; then
+ PR_NUM_FROM_STEP="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
+fi
+if [ -z "${PR_NUM_FROM_STEP}" ]; then
+ PR_NUM_FROM_STEP="$(gh pr view --json number -q '.number')"
+fi
+if [ -z "${TRIGGER_TS}" ]; then TRIGGER_TS="1970-01-01T00:00:00Z"; fi
+while [ "$ELAPSED" -lt "$TIMEOUT" ]; do
+ BOT_INLINE_COMMENTS=$(gh api "repos/${REPO_LOCAL}/pulls/${PR_NUM_FROM_STEP}/comments?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select(.created_at >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")))] | length')
+ BOT_PR_COMMENTS=$(gh api "repos/${REPO_LOCAL}/issues/${PR_NUM_FROM_STEP}/comments?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.created_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")) and (((.body // "") | ascii_downcase | contains("@codex review")) | not))] | length')
+ BOT_PR_FINDINGS=$(gh api "repos/${REPO_LOCAL}/issues/${PR_NUM_FROM_STEP}/comments?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.created_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")) and (((.body // "") | ascii_downcase | contains("@codex review")) | not) and ((.body // "") | ascii_downcase | test("(^|[^a-z0-9])p[0-3]([^a-z0-9]|$)|changes requested|must fix|blocking|severity|critical")))] | length')
+ BOT_REVIEWS=$(gh api "repos/${REPO_LOCAL}/pulls/${PR_NUM_FROM_STEP}/reviews?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.submitted_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")))] | length')
+ BOT_REVIEW_FINDINGS=$(gh api "repos/${REPO_LOCAL}/pulls/${PR_NUM_FROM_STEP}/reviews?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.submitted_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")) and ((((.state // "") | ascii_downcase) == "changes_requested") or ((.body // "") | ascii_downcase | test("(^|[^a-z0-9])p[0-3]([^a-z0-9]|$)|changes requested|must fix|blocking|severity|critical"))))] | length')
+ BOT_TRIGGER_REACTIONS=0
+ if [ -n "${TRIGGER_COMMENT_ID}" ]; then
+ BOT_TRIGGER_REACTIONS=$(gh api "repos/${REPO_LOCAL}/issues/comments/${TRIGGER_COMMENT_ID}/reactions?per_page=100" -H "Accept: application/vnd.github+json" | jq -r '[.[]? | select((.user.login | ascii_downcase | test("codex|bot|connector")))] | length')
+ fi
+ echo "heartbeat elapsed=${ELAPSED}s inline=${BOT_INLINE_COMMENTS} pr_comments=${BOT_PR_COMMENTS} pr_findings=${BOT_PR_FINDINGS} reviews=${BOT_REVIEWS} review_findings=${BOT_REVIEW_FINDINGS} reactions=${BOT_TRIGGER_REACTIONS}"
+ if [ "${BOT_INLINE_COMMENTS}" -gt 0 ] || [ "${BOT_PR_FINDINGS}" -gt 0 ] || [ "${BOT_REVIEW_FINDINGS}" -gt 0 ]; then
+ echo "1"
+ exit 0
+ fi
+ if [ "${BOT_PR_COMMENTS}" -gt 0 ] || [ "${BOT_REVIEWS}" -gt 0 ] || [ "${BOT_TRIGGER_REACTIONS}" -gt 0 ]; then
+ echo ""
+ exit 0
+ fi
+ sleep "$INTERVAL"
+ ELAPSED=$((ELAPSED + INTERVAL))
+done
+echo "ERROR: Timed out waiting for re-triggered bot response." >&2
+exit 1
+```
+
+## IF ${STEP_25_OUTPUT}
+
+## Step 26: Stop on Remaining Bot Findings
+
+Tool: bash
+OnFail: abort
+
+Abort merge when re-triggered bot review still reports findings.
+
+```bash
+echo "ERROR: Bot review still has findings after re-trigger. Do not merge." >&2
+exit 1
+```
+
+## ELSE
+
+## Step 27: Merge PR After Re-review Clean
+
+Tool: bash
+OnFail: abort
+
+Squash-merge the PR after the second bot review returns clean, then update local main.
+
+```bash
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
+PR_NUM_LOCAL="$(printf '%s\n' "${STEP_24_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
+if [ -z "${PR_NUM_LOCAL}" ]; then
+ PR_NUM_LOCAL="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
+fi
+if [ -z "${PR_NUM_LOCAL}" ]; then
+ PR_NUM_LOCAL="$(gh pr view --json number -q '.number')"
+fi
+gh pr merge "${PR_NUM_LOCAL}" --repo "${REPO_LOCAL}" --squash --delete-branch
+git checkout main && git pull origin main
+```
+
+## ENDIF
+
+## ELSE
+
+## Step 28: Merge PR (Initial Review Clean)
+
+Tool: bash
+OnFail: abort
+
+No issues were found in the initial bot review. Merge using the PR number from step output.
+
+```bash
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
+PR_NUM_LOCAL="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
+if [ -z "${PR_NUM_LOCAL}" ]; then
+ PR_NUM_LOCAL="$(gh pr view --json number -q '.number')"
+fi
+gh pr merge "${PR_NUM_LOCAL}" --repo "${REPO_LOCAL}" --squash --delete-branch
+git checkout main && git pull origin main
+```
+
+## ENDIF
diff --git a/patterns/dev2merge/skills/dev2merge/SKILL.md b/patterns/dev2merge/skills/dev2merge/SKILL.md
new file mode 100644
index 00000000..0a83a553
--- /dev/null
+++ b/patterns/dev2merge/skills/dev2merge/SKILL.md
@@ -0,0 +1,102 @@
+---
+name: dev2merge
+description: Full development cycle from branch creation through mktd planning, commit, PR, codex-bot review, and merge
+allowed-tools: Bash, Read, Grep, Glob, Edit, Write
+triggers:
+ - "dev2merge"
+ - "/dev2merge"
+ - "dev-to-merge"
+ - "/dev-to-merge"
+ - "full dev cycle"
+ - "implement and merge"
+---
+
+# Dev2Merge: End-to-End Development Workflow
+
+## Role Detection (READ THIS FIRST -- MANDATORY)
+
+Role MUST be determined by explicit mode marker, not fragile natural-language substring matching.
+Treat the run as executor ONLY when initial prompt contains:
+`executor`.
+
+**YOU ARE THE EXECUTOR.** Follow these rules:
+1. **SKIP the "Execution Protocol" section below** -- it is for the orchestrator, not you.
+2. **Read the pattern** at `patterns/dev2merge/PATTERN.md` and follow it step by step.
+3. **RECURSION GUARD**: Do NOT run `csa run --skill dev2merge` or `csa run --skill dev-to-merge` from inside this skill. Other `csa` commands required by the workflow (for example `csa run --skill mktd`, `csa review`, `csa debate`) are allowed.
+
+**Only if you are the main agent (Claude Code / human user)**:
+- You are the **orchestrator**. Follow the "Execution Protocol" steps below.
+
+---
+
+## Purpose
+
+Execute the complete development lifecycle on a feature branch: mandatory mktd planning (with internal debate), format, lint, test, stage, security scan, security audit, heterogeneous code review, commit with Conventional Commits, push, create PR, trigger cloud codex-bot review, handle false-positive arbitration via debate, fix-and-retrigger loops, and final squash-merge to main. This is the "everything in one command" workflow that composes `mktd`, `commit`, `security-audit`, `ai-reviewed-commit`, and `pr-codex-bot` into a single end-to-end pipeline.
+
+## Execution Protocol (ORCHESTRATOR ONLY)
+
+### Prerequisites
+
+- `csa` binary MUST be in PATH: `which csa`
+- `gh` CLI MUST be authenticated: `gh auth status`
+- `just` MUST be in PATH: `which just`
+- Must be on a feature branch (not `main` or `dev`)
+- Code changes must exist (staged or unstaged)
+
+### Quick Start
+
+```bash
+csa run --skill dev2merge "Implement, review, and merge "
+```
+
+### Step-by-Step
+
+1. **Validate branch**: Verify on feature branch, not main/dev. Abort if protected.
+2. **Plan first (mktd)**: Run `csa run --skill mktd` and require a saved TODO for current branch (checkbox tasks + `DONE WHEN`). This guarantees mktd's built-in debate phase executed.
+3. **Quality gates**: Run `just fmt`, `just clippy`, `just test` sequentially.
+4. **Stage changes**: `git add -A`, then unstage incidental lockfiles unless scope indicates release/dependency updates.
+5. **Security scan**: Grep staged files for hardcoded secrets.
+6. **Security audit**: Run `security-audit` via bounded bash wrapper with timeout and required `SECURITY_AUDIT_VERDICT`.
+7. **Pre-commit review**: Run `csa review --diff` (heterogeneous reviewer). Fix issues up to 3 rounds.
+8. **Re-run quality gates**: `just pre-commit` after any fixes.
+9. **Generate commit message**: Delegate to CSA (tier-1) for Conventional Commits.
+10. **Commit**: `git commit -m "${COMMIT_MSG}"`.
+11. **Version gate precheck**: auto-run `just check-version-bumped`; if needed, `just bump-patch` and create a dedicated release commit before pre-PR review/push.
+12. **Pre-PR cumulative review**: `csa review --range main...HEAD` (covers full branch, NOT just last commit). MUST pass before push.
+13. **Push**: `git push -u origin ${BRANCH}`.
+14. **Create PR**: `gh pr create --base main`.
+15. **Trigger codex bot**: post `@codex review` and capture trigger timestamp.
+16. **Poll and evaluate**: wait for bot comments/reviews newer than trigger timestamp.
+17. **Arbitrate false positives**: Use `csa debate` with independent model.
+18. **Fix real issues**: Commit fixes, push, re-trigger bot (max 10 iterations).
+19. **Merge**: `gh pr merge --squash --delete-branch`, update local main.
+
+## Example Usage
+
+| Command | Effect |
+|---------|--------|
+| `/dev2merge scope=executor` | Full cycle for executor module changes |
+| `/dev2merge` | Full cycle for all current changes |
+| `/dev-to-merge` | Backward-compatible alias to `dev2merge` |
+
+## Integration
+
+- **Composes**: `mktd`, `security-audit`, `ai-reviewed-commit` / `csa-review`, `commit`, `pr-codex-bot`
+- **Uses**: `mktd` (mandatory planning + debate evidence), `debate` (false-positive arbitration and self-authored review)
+- **Standalone**: Complete workflow -- does not need other skills to be invoked separately
+
+## Done Criteria
+
+1. Feature branch validated (not main/dev).
+2. mktd plan completed and a branch TODO was saved (`DONE WHEN` present).
+3. `just fmt`, `just clippy`, `just test` all exit 0.
+4. Security scan found no hardcoded secrets.
+5. Security audit returned PASS or PASS_DEFERRED.
+6. Pre-commit review completed with zero unresolved P0/P1 issues.
+7. Commit created with Conventional Commits format.
+8. PR created on GitHub targeting main.
+9. Cloud codex bot triggered and response handled.
+10. All bot comments classified and actioned (fixed, arbitrated, or acknowledged).
+11. PR merged via squash-merge.
+12. Local main updated: `git checkout main && git pull origin main`.
+13. Feature branch deleted (remote and local).
diff --git a/patterns/dev2merge/workflow.toml b/patterns/dev2merge/workflow.toml
new file mode 100644
index 00000000..d728e30b
--- /dev/null
+++ b/patterns/dev2merge/workflow.toml
@@ -0,0 +1,788 @@
+[workflow]
+name = "dev2merge"
+description = "Full development cycle from branch creation through mktd planning, commit, PR, codex-bot review, and merge"
+
+[[workflow.variables]]
+name = "AUDIT_OUTPUT"
+
+[[workflow.variables]]
+name = "AUDIT_PROMPT"
+
+[[workflow.variables]]
+name = "AUDIT_STATUS"
+
+[[workflow.variables]]
+name = "BOT_INLINE_COMMENTS"
+
+[[workflow.variables]]
+name = "BOT_PR_COMMENTS"
+
+[[workflow.variables]]
+name = "BOT_PR_FINDINGS"
+
+[[workflow.variables]]
+name = "BOT_REVIEWS"
+
+[[workflow.variables]]
+name = "BOT_REVIEW_FINDINGS"
+
+[[workflow.variables]]
+name = "BOT_TRIGGER_REACTIONS"
+
+[[workflow.variables]]
+name = "BRANCH"
+
+[[workflow.variables]]
+name = "COMMENTS_PAYLOAD"
+
+[[workflow.variables]]
+name = "COMMENT_URL"
+
+[[workflow.variables]]
+name = "COMMIT_MSG_LOCAL"
+
+[[workflow.variables]]
+name = "ELAPSED"
+
+[[workflow.variables]]
+name = "MATCHED_LOCKFILES"
+
+[[workflow.variables]]
+name = "ORIGIN_URL"
+
+[[workflow.variables]]
+name = "PRE_DIRTY_CARGO_LOCK"
+
+[[workflow.variables]]
+name = "PR_BODY_LOCAL"
+
+[[workflow.variables]]
+name = "PR_NUM"
+
+[[workflow.variables]]
+name = "PR_NUM_FROM_STEP"
+
+[[workflow.variables]]
+name = "PR_NUM_LOCAL"
+
+[[workflow.variables]]
+name = "REPO_LOCAL"
+
+[[workflow.variables]]
+name = "REVIEW_HAS_ISSUES"
+
+[[workflow.variables]]
+name = "SCOPE"
+
+[[workflow.variables]]
+name = "SELF_LOGIN"
+
+[[workflow.variables]]
+name = "STAGED_FILES"
+
+[[workflow.variables]]
+name = "STEP_19_OUTPUT"
+
+[[workflow.variables]]
+name = "STEP_25_OUTPUT"
+
+[[workflow.variables]]
+name = "TRIGGER_COMMENT_ID"
+
+[[workflow.variables]]
+name = "TRIGGER_TS"
+
+[[workflow.variables]]
+name = "VERDICT"
+
+[[workflow.variables]]
+name = "VERSION"
+
+[[workflow.variables]]
+name = "lockpath"
+
+[[workflow.steps]]
+id = 1
+title = "Validate Branch"
+tool = "bash"
+prompt = """
+Verify the current branch is a feature branch, not a protected branch.
+If on main or dev, abort immediately.
+
+```bash
+BRANCH="$(git branch --show-current)"
+if [ -z "${BRANCH}" ] || [ "${BRANCH}" = "HEAD" ]; then
+ echo "ERROR: Cannot determine current branch."
+ exit 1
+fi
+DEFAULT_BRANCH=$(git symbolic-ref refs/remotes/origin/HEAD 2>/dev/null | sed 's@^refs/remotes/origin/@@')
+if [ -z "$DEFAULT_BRANCH" ]; then DEFAULT_BRANCH="main"; fi
+if [ "$BRANCH" = "$DEFAULT_BRANCH" ] || [ "$BRANCH" = "dev" ]; then
+ echo "ERROR: Cannot work directly on $BRANCH. Create a feature branch."
+ exit 1
+fi
+```"""
+on_fail = "abort"
+
+[[workflow.steps]]
+id = 2
+title = "Step 1.5: Plan with mktd (Debate Required)"
+tool = "bash"
+prompt = """
+Generate or refresh a branch TODO plan through `mktd` before development gates.
+This step MUST pass through mktd's built-in debate phase and save a TODO."""
+on_fail = "abort"
+
+[[workflow.steps]]
+id = 3
+title = "Include mktd"
+tool = "weave"
+prompt = "mktd"
+on_fail = "abort"
+
+[[workflow.steps]]
+id = 4
+title = "Run Formatters"
+tool = "bash"
+prompt = """
+Run the project formatter to ensure consistent code style.
+
+```bash
+just fmt
+```"""
+
+[workflow.steps.on_fail]
+retry = 2
+
+[[workflow.steps]]
+id = 5
+title = "Run Linters"
+tool = "bash"
+prompt = """
+Run linters to catch static analysis issues.
+
+```bash
+just clippy
+```"""
+
+[workflow.steps.on_fail]
+retry = 2
+
+[[workflow.steps]]
+id = 6
+title = "Run Tests"
+tool = "bash"
+prompt = """
+Run the full test suite. All tests must pass before proceeding.
+
+```bash
+just test
+```"""
+on_fail = "abort"
+
+[[workflow.steps]]
+id = 7
+title = "Stage Changes"
+tool = "bash"
+prompt = '''
+Stage all modified and new files relevant to ${SCOPE}.
+Verify no untracked files remain.
+
+```bash
+git add -A
+if ! printf '%s' "${SCOPE:-}" | grep -Eqi 'release|version|lock|deps|dependency'; then
+ STAGED_FILES="$(git diff --cached --name-only)"
+ if printf '%s\n' "${STAGED_FILES}" | grep -Eq '(^|/)Cargo[.]toml$|(^|/)package[.]json$|(^|/)pnpm-workspace[.]yaml$|(^|/)go[.]mod$'; then
+ echo "INFO: Dependency manifest change detected; preserving staged lockfiles."
+ elif ! printf '%s\n' "${STAGED_FILES}" | grep -Ev '(^|/)(Cargo[.]lock|package-lock[.]json|pnpm-lock[.]yaml|yarn[.]lock|go[.]sum)$' | grep -q .; then
+ echo "INFO: Lockfile-only staged change detected; preserving staged lockfiles."
+ else
+ MATCHED_LOCKFILES="$(printf '%s\n' "${STAGED_FILES}" | grep -E '(^|/)(Cargo[.]lock|package-lock[.]json|pnpm-lock[.]yaml|yarn[.]lock|go[.]sum)$' || true)"
+ if [ -n "${MATCHED_LOCKFILES}" ]; then
+ printf '%s\n' "${MATCHED_LOCKFILES}" | while read -r lockpath; do
+ echo "INFO: Unstaging incidental lockfile change: ${lockpath}"
+ git restore --staged -- "${lockpath}"
+ done
+ fi
+ fi
+fi
+if ! git diff --cached --name-only | grep -q .; then
+ echo "ERROR: No staged files remain after scope filtering."
+ exit 1
+fi
+if git ls-files --others --exclude-standard | grep -q .; then
+ echo "ERROR: Untracked files detected."
+ git ls-files --others --exclude-standard
+ exit 1
+fi
+```'''
+on_fail = "abort"
+
+[[workflow.steps]]
+id = 8
+title = "Security Scan"
+tool = "bash"
+prompt = """
+Check for hardcoded secrets, debug statements, and commented-out code
+in staged files. Runs after staging so `git diff --cached` covers all changes.
+
+```bash
+git diff --cached --name-only | while read -r file; do
+ if grep -nE '(API_KEY|SECRET|PASSWORD|PRIVATE_KEY)=' "$file" 2>/dev/null; then
+ echo "FAIL: Potential secret in $file"
+ exit 1
+ fi
+done
+```"""
+on_fail = "abort"
+
+[[workflow.steps]]
+id = 9
+title = "Security Audit"
+tool = "bash"
+prompt = '''
+Run the security-audit skill: test completeness check, vulnerability scan,
+and code quality check. The audit MUST pass before commit.
+
+Phase 1: Can you propose a test case that does not exist? If yes, FAIL.
+Phase 2: Input validation, size limits, panic risks.
+Phase 3: No debug code, secrets, or commented-out code.
+
+```bash
+AUDIT_PROMPT="Use the security-audit skill.
+Run security-audit against staged changes.
+Output a concise report and end with EXACTLY one line:
+SECURITY_AUDIT_VERDICT: PASS|PASS_DEFERRED|FAIL"
+if command -v timeout >/dev/null 2>&1; then
+ AUDIT_OUTPUT="$(timeout 1200 csa run --skill security-audit "${AUDIT_PROMPT}" 2>&1)"
+ AUDIT_STATUS=$?
+else
+ AUDIT_OUTPUT="$(csa run --skill security-audit "${AUDIT_PROMPT}" 2>&1)"
+ AUDIT_STATUS=$?
+fi
+printf '%s\n' "${AUDIT_OUTPUT}"
+if [ "${AUDIT_STATUS}" -eq 124 ]; then
+ echo "ERROR: security-audit timed out after 1200s." >&2
+ exit 1
+fi
+if [ "${AUDIT_STATUS}" -ne 0 ]; then
+ echo "ERROR: security-audit command failed (exit=${AUDIT_STATUS})." >&2
+ exit 1
+fi
+VERDICT="$(printf '%s\n' "${AUDIT_OUTPUT}" | sed -nE 's/^SECURITY_AUDIT_VERDICT:[[:space:]]*(PASS_DEFERRED|PASS|FAIL)$/\1/p' | tail -n1)"
+if [ -z "${VERDICT}" ]; then
+ echo "ERROR: Missing SECURITY_AUDIT_VERDICT marker in audit output." >&2
+ exit 1
+fi
+if [ "${VERDICT}" = "FAIL" ]; then
+ echo "ERROR: security-audit verdict is FAIL." >&2
+ exit 1
+fi
+echo "SECURITY_AUDIT_VERDICT=${VERDICT}"
+```'''
+on_fail = "abort"
+
+[[workflow.steps]]
+id = 10
+title = "Pre-Commit Review"
+tool = "csa"
+prompt = """
+Run heterogeneous code review on all uncommitted changes versus HEAD.
+The reviewer MUST be a different model family than the code author.
+
+```bash
+csa review --diff
+```
+
+Review output includes AGENTS.md compliance checklist."""
+tier = "tier-2-standard"
+on_fail = "abort"
+
+[[workflow.steps]]
+id = 11
+title = "Fix Review Issues"
+tool = "csa"
+prompt = """
+Fix each issue identified by the pre-commit review.
+Preserve original code intent. Do NOT delete code to silence warnings."""
+tier = "tier-2-standard"
+condition = "${REVIEW_HAS_ISSUES}"
+
+[workflow.steps.on_fail]
+retry = 3
+
+[[workflow.steps]]
+id = 12
+title = "Re-run Quality Gates"
+tool = "bash"
+prompt = """
+Re-run formatters, linters, and tests after fixes.
+
+```bash
+just pre-commit
+```"""
+on_fail = "abort"
+condition = "${REVIEW_HAS_ISSUES}"
+
+[[workflow.steps]]
+id = 13
+title = "Re-review"
+tool = "csa"
+prompt = """
+Run `csa review --diff` again to verify all issues are resolved.
+Loop back to Step 9 if issues persist (max 3 rounds)."""
+tier = "tier-2-standard"
+on_fail = "abort"
+condition = "${REVIEW_HAS_ISSUES}"
+
+[[workflow.steps]]
+id = 14
+title = "Generate Commit Message"
+tool = "bash"
+prompt = """
+Generate a deterministic Conventional Commits message from staged files.
+
+```bash
+scripts/gen_commit_msg.sh "${SCOPE:-}"
+```"""
+on_fail = "abort"
+
+[[workflow.steps]]
+id = 15
+title = "Commit"
+tool = "bash"
+prompt = """
+Create the commit using the generated message from Step 12.
+
+```bash
+COMMIT_MSG_LOCAL="${STEP_12_OUTPUT:-${COMMIT_MSG:-}}"
+if [ -z "${COMMIT_MSG_LOCAL}" ]; then
+ echo "ERROR: Commit message is empty. Step 12 must output a commit message." >&2
+ exit 1
+fi
+git commit -m "${COMMIT_MSG_LOCAL}"
+```"""
+on_fail = "abort"
+
+[[workflow.steps]]
+id = 16
+title = "Ensure Version Bumped"
+tool = "bash"
+prompt = """
+Ensure workspace version differs from main before push gate.
+If not bumped yet, auto-bump patch and create a dedicated release commit.
+
+```bash
+set -euo pipefail
+if just check-version-bumped; then
+ echo "Version bump check passed."
+ exit 0
+fi
+PRE_DIRTY_CARGO_LOCK=0
+if git diff --name-only -- Cargo.lock | grep -q .; then
+ PRE_DIRTY_CARGO_LOCK=1
+fi
+just bump-patch
+# Use workspace weave binary to avoid stale globally-installed version drift.
+cargo run -p weave -- lock
+git add Cargo.toml weave.lock
+if [ "${PRE_DIRTY_CARGO_LOCK}" -eq 0 ] && [ -f Cargo.lock ]; then
+ git add Cargo.lock
+else
+ echo "INFO: Skipping Cargo.lock in release commit (pre-existing local edits)."
+fi
+if git diff --cached --quiet; then
+ echo "ERROR: Version bump expected changes but none were staged." >&2
+ exit 1
+fi
+VERSION="$(cargo metadata --no-deps --format-version 1 | jq -r '.packages[] | select(.name == "cli-sub-agent") | .version')"
+git commit -m "chore(release): bump workspace version to ${VERSION}"
+```"""
+on_fail = "abort"
+
+[[workflow.steps]]
+id = 17
+title = "Pre-PR Cumulative Review"
+tool = "csa"
+prompt = """
+Run a cumulative review covering ALL commits on the feature branch since main.
+This is distinct from Step 8's per-commit review (`csa review --diff`):
+- Step 8 reviews uncommitted changes (staged diff) — single-commit granularity.
+- This step reviews the full feature branch — catches cross-commit issues.
+
+MANDATORY: This review MUST pass before pushing to origin.
+
+```bash
+csa review --range main...HEAD
+CUMULATIVE_REVIEW_COMPLETED=true
+```"""
+tier = "tier-2-standard"
+on_fail = "abort"
+
+[[workflow.steps]]
+id = 18
+title = "Push to Origin"
+tool = "bash"
+prompt = """
+Push the feature branch to the remote origin.
+
+```bash
+BRANCH="$(git branch --show-current)"
+if [ -z "${BRANCH}" ] || [ "${BRANCH}" = "HEAD" ]; then
+ echo "ERROR: Cannot determine current branch for push."
+ exit 1
+fi
+git push -u origin "${BRANCH}"
+```"""
+
+[workflow.steps.on_fail]
+retry = 2
+
+[[workflow.steps]]
+id = 19
+title = "Create Pull Request"
+tool = "bash"
+prompt = '''
+Create a PR targeting main via GitHub CLI. The PR body includes a summary
+of changes for ${SCOPE} and a test plan checklist covering tests, linting,
+security audit, and codex review.
+
+```bash
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
+COMMIT_MSG_LOCAL="${STEP_12_OUTPUT:-${COMMIT_MSG:-}}"
+if [ -z "${COMMIT_MSG_LOCAL}" ]; then
+ echo "ERROR: PR title is empty. Step 12 output is required." >&2
+ exit 1
+fi
+PR_BODY_LOCAL="${PR_BODY:-## Summary
+- Scope: ${SCOPE:-unspecified}'''
+on_fail = "abort"
+
+[[workflow.steps]]
+id = 20
+title = "Validation"
+prompt = """
+- just fmt
+- just clippy
+- just test
+- csa review --range main...HEAD
+}"
+gh pr create --base main --repo "${REPO_LOCAL}" --title "${COMMIT_MSG_LOCAL}" --body "${PR_BODY_LOCAL}"
+```"""
+on_fail = "abort"
+
+[[workflow.steps]]
+id = 21
+title = "Trigger Codex Bot Review"
+tool = "bash"
+prompt = '''
+Trigger the cloud codex review bot on the newly created PR.
+Capture the PR number for polling.
+
+```bash
+set -euo pipefail
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
+PR_NUM=$(gh pr view --json number -q '.number')
+COMMENT_URL="$(gh pr comment "${PR_NUM}" --repo "${REPO_LOCAL}" --body "@codex review")"
+SELF_LOGIN=$(gh api user -q '.login')
+COMMENTS_PAYLOAD=$(gh api "repos/${REPO_LOCAL}/issues/${PR_NUM}/comments?per_page=100")
+TRIGGER_TS=$(printf '%s' "${COMMENTS_PAYLOAD}" | jq -r --arg me "${SELF_LOGIN}" '[.[]? | select((.user.login // "") == $me and (.body // "") == "@codex review")] | sort_by(.created_at) | last | .created_at // empty')
+TRIGGER_COMMENT_ID=$(printf '%s' "${COMMENT_URL}" | sed -nE 's#.*issuecomment-([0-9]+).*#\1#p')
+if [ -z "${TRIGGER_COMMENT_ID}" ]; then
+ TRIGGER_COMMENT_ID=$(printf '%s' "${COMMENTS_PAYLOAD}" | jq -r --arg me "${SELF_LOGIN}" '[.[]? | select((.user.login // "") == $me and (.body // "") == "@codex review")] | sort_by(.created_at) | last | .id // empty')
+fi
+if [ -z "${TRIGGER_TS}" ]; then
+ TRIGGER_TS=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
+fi
+printf 'PR_NUM=%s\nTRIGGER_TS=%s\nTRIGGER_COMMENT_ID=%s\n' "${PR_NUM}" "${TRIGGER_TS}" "${TRIGGER_COMMENT_ID}"
+```'''
+on_fail = "abort"
+
+[[workflow.steps]]
+id = 22
+title = "Poll for Bot Response"
+tool = "bash"
+prompt = '''
+Poll for bot review response with a bounded timeout (max 20 minutes).
+Output `1` when bot findings are present; output empty string otherwise.
+
+```bash
+TIMEOUT=1200; INTERVAL=30; ELAPSED=0
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
+PR_NUM_FROM_STEP="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
+TRIGGER_TS="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^TRIGGER_TS=//p' | tail -n1)"
+TRIGGER_COMMENT_ID="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^TRIGGER_COMMENT_ID=//p' | tail -n1)"
+if [ -z "${PR_NUM_FROM_STEP}" ]; then PR_NUM_FROM_STEP="${PR_NUM}"; fi
+if [ -z "${TRIGGER_TS}" ]; then TRIGGER_TS="1970-01-01T00:00:00Z"; fi
+while [ "$ELAPSED" -lt "$TIMEOUT" ]; do
+ BOT_INLINE_COMMENTS=$(gh api "repos/${REPO_LOCAL}/pulls/${PR_NUM_FROM_STEP}/comments?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select(.created_at >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")))] | length')
+ BOT_PR_COMMENTS=$(gh api "repos/${REPO_LOCAL}/issues/${PR_NUM_FROM_STEP}/comments?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.created_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")) and (((.body // "") | ascii_downcase | contains("@codex review")) | not))] | length')
+ BOT_PR_FINDINGS=$(gh api "repos/${REPO_LOCAL}/issues/${PR_NUM_FROM_STEP}/comments?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.created_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")) and (((.body // "") | ascii_downcase | contains("@codex review")) | not) and ((.body // "") | ascii_downcase | test("(^|[^a-z0-9])p[0-3]([^a-z0-9]|$)|changes requested|must fix|blocking|severity|critical")))] | length')
+ BOT_REVIEWS=$(gh api "repos/${REPO_LOCAL}/pulls/${PR_NUM_FROM_STEP}/reviews?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.submitted_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")))] | length')
+ BOT_REVIEW_FINDINGS=$(gh api "repos/${REPO_LOCAL}/pulls/${PR_NUM_FROM_STEP}/reviews?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.submitted_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")) and ((((.state // "") | ascii_downcase) == "changes_requested") or ((.body // "") | ascii_downcase | test("(^|[^a-z0-9])p[0-3]([^a-z0-9]|$)|changes requested|must fix|blocking|severity|critical"))))] | length')
+ BOT_TRIGGER_REACTIONS=0
+ if [ -n "${TRIGGER_COMMENT_ID}" ]; then
+ BOT_TRIGGER_REACTIONS=$(gh api "repos/${REPO_LOCAL}/issues/comments/${TRIGGER_COMMENT_ID}/reactions?per_page=100" -H "Accept: application/vnd.github+json" | jq -r '[.[]? | select((.user.login | ascii_downcase | test("codex|bot|connector")))] | length')
+ fi
+ echo "heartbeat elapsed=${ELAPSED}s inline=${BOT_INLINE_COMMENTS} pr_comments=${BOT_PR_COMMENTS} pr_findings=${BOT_PR_FINDINGS} reviews=${BOT_REVIEWS} review_findings=${BOT_REVIEW_FINDINGS} reactions=${BOT_TRIGGER_REACTIONS}"
+ if [ "${BOT_INLINE_COMMENTS}" -gt 0 ] || [ "${BOT_PR_FINDINGS}" -gt 0 ] || [ "${BOT_REVIEW_FINDINGS}" -gt 0 ]; then
+ echo "1"
+ exit 0
+ fi
+ if [ "${BOT_PR_COMMENTS}" -gt 0 ] || [ "${BOT_REVIEWS}" -gt 0 ] || [ "${BOT_TRIGGER_REACTIONS}" -gt 0 ]; then
+ echo ""
+ exit 0
+ fi
+ sleep "$INTERVAL"
+ ELAPSED=$((ELAPSED + INTERVAL))
+done
+echo "ERROR: Timed out waiting for bot response." >&2
+exit 1
+```'''
+on_fail = "abort"
+
+[[workflow.steps]]
+id = 23
+title = "Evaluate Bot Comments"
+tool = "csa"
+prompt = """
+Evaluate all inline bot findings on the PR and produce a consolidated action plan.
+List suspected false positives and real defects separately."""
+tier = "tier-2-standard"
+on_fail = "abort"
+condition = "${STEP_19_OUTPUT}"
+
+[[workflow.steps]]
+id = 24
+title = "Arbitrate Disputed Findings"
+tool = "csa"
+prompt = """
+For disputed findings, run independent arbitration using `csa debate` and
+produce a verdict for each disputed item."""
+on_fail = "abort"
+condition = "${STEP_19_OUTPUT}"
+
+[[workflow.steps]]
+id = 25
+title = "Fix Confirmed Issues"
+tool = "csa"
+prompt = """
+Implement fixes for confirmed bot findings and create commit(s) with clear
+messages. Do not modify unrelated files."""
+tier = "tier-2-standard"
+on_fail = "abort"
+condition = "${STEP_19_OUTPUT}"
+
+[[workflow.steps]]
+id = 26
+title = "Re-run Local Review After Fixes"
+tool = "csa"
+prompt = "Run `csa review --diff` to validate fixes before re-triggering cloud review."
+tier = "tier-2-standard"
+condition = "${STEP_19_OUTPUT}"
+
+[workflow.steps.on_fail]
+retry = 2
+
+[[workflow.steps]]
+id = 27
+title = "Push Fixes and Re-trigger Review"
+tool = "bash"
+prompt = '''
+Push all fix commits and trigger a new round of codex review.
+
+```bash
+set -euo pipefail
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
+BRANCH="$(git branch --show-current)"
+if [ -z "${BRANCH}" ] || [ "${BRANCH}" = "HEAD" ]; then
+ echo "ERROR: Cannot determine current branch for push."
+ exit 1
+fi
+git push origin "${BRANCH}"
+PR_NUM_LOCAL="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
+if [ -z "${PR_NUM_LOCAL}" ]; then
+ PR_NUM_LOCAL="$(gh pr view --json number -q '.number')"
+fi
+COMMENT_URL="$(gh pr comment "${PR_NUM_LOCAL}" --repo "${REPO_LOCAL}" --body "@codex review")"
+SELF_LOGIN=$(gh api user -q '.login')
+COMMENTS_PAYLOAD=$(gh api "repos/${REPO_LOCAL}/issues/${PR_NUM_LOCAL}/comments?per_page=100")
+TRIGGER_TS=$(printf '%s' "${COMMENTS_PAYLOAD}" | jq -r --arg me "${SELF_LOGIN}" '[.[]? | select((.user.login // "") == $me and (.body // "") == "@codex review")] | sort_by(.created_at) | last | .created_at // empty')
+TRIGGER_COMMENT_ID=$(printf '%s' "${COMMENT_URL}" | sed -nE 's#.*issuecomment-([0-9]+).*#\1#p')
+if [ -z "${TRIGGER_COMMENT_ID}" ]; then
+ TRIGGER_COMMENT_ID=$(printf '%s' "${COMMENTS_PAYLOAD}" | jq -r --arg me "${SELF_LOGIN}" '[.[]? | select((.user.login // "") == $me and (.body // "") == "@codex review")] | sort_by(.created_at) | last | .id // empty')
+fi
+if [ -z "${TRIGGER_TS}" ]; then
+ TRIGGER_TS=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
+fi
+printf 'PR_NUM=%s\nTRIGGER_TS=%s\nTRIGGER_COMMENT_ID=%s\n' "${PR_NUM_LOCAL}" "${TRIGGER_TS}" "${TRIGGER_COMMENT_ID}"
+```'''
+on_fail = "abort"
+condition = "${STEP_19_OUTPUT}"
+
+[[workflow.steps]]
+id = 28
+title = "Poll Re-triggered Bot Response"
+tool = "bash"
+prompt = '''
+After posting the second `@codex review`, poll again with bounded timeout.
+Output `1` when findings remain; output empty string when clean.
+
+```bash
+TIMEOUT=1200; INTERVAL=30; ELAPSED=0
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
+PR_NUM_FROM_STEP="$(printf '%s\n' "${STEP_24_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
+TRIGGER_TS="$(printf '%s\n' "${STEP_24_OUTPUT:-}" | sed -n 's/^TRIGGER_TS=//p' | tail -n1)"
+TRIGGER_COMMENT_ID="$(printf '%s\n' "${STEP_24_OUTPUT:-}" | sed -n 's/^TRIGGER_COMMENT_ID=//p' | tail -n1)"
+if [ -z "${PR_NUM_FROM_STEP}" ]; then
+ PR_NUM_FROM_STEP="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
+fi
+if [ -z "${PR_NUM_FROM_STEP}" ]; then
+ PR_NUM_FROM_STEP="$(gh pr view --json number -q '.number')"
+fi
+if [ -z "${TRIGGER_TS}" ]; then TRIGGER_TS="1970-01-01T00:00:00Z"; fi
+while [ "$ELAPSED" -lt "$TIMEOUT" ]; do
+ BOT_INLINE_COMMENTS=$(gh api "repos/${REPO_LOCAL}/pulls/${PR_NUM_FROM_STEP}/comments?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select(.created_at >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")))] | length')
+ BOT_PR_COMMENTS=$(gh api "repos/${REPO_LOCAL}/issues/${PR_NUM_FROM_STEP}/comments?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.created_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")) and (((.body // "") | ascii_downcase | contains("@codex review")) | not))] | length')
+ BOT_PR_FINDINGS=$(gh api "repos/${REPO_LOCAL}/issues/${PR_NUM_FROM_STEP}/comments?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.created_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")) and (((.body // "") | ascii_downcase | contains("@codex review")) | not) and ((.body // "") | ascii_downcase | test("(^|[^a-z0-9])p[0-3]([^a-z0-9]|$)|changes requested|must fix|blocking|severity|critical")))] | length')
+ BOT_REVIEWS=$(gh api "repos/${REPO_LOCAL}/pulls/${PR_NUM_FROM_STEP}/reviews?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.submitted_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")))] | length')
+ BOT_REVIEW_FINDINGS=$(gh api "repos/${REPO_LOCAL}/pulls/${PR_NUM_FROM_STEP}/reviews?per_page=100" | jq -r --arg ts "${TRIGGER_TS}" '[.[]? | select((.submitted_at // "") >= $ts and (.user.login | ascii_downcase | test("codex|bot|connector")) and ((((.state // "") | ascii_downcase) == "changes_requested") or ((.body // "") | ascii_downcase | test("(^|[^a-z0-9])p[0-3]([^a-z0-9]|$)|changes requested|must fix|blocking|severity|critical"))))] | length')
+ BOT_TRIGGER_REACTIONS=0
+ if [ -n "${TRIGGER_COMMENT_ID}" ]; then
+ BOT_TRIGGER_REACTIONS=$(gh api "repos/${REPO_LOCAL}/issues/comments/${TRIGGER_COMMENT_ID}/reactions?per_page=100" -H "Accept: application/vnd.github+json" | jq -r '[.[]? | select((.user.login | ascii_downcase | test("codex|bot|connector")))] | length')
+ fi
+ echo "heartbeat elapsed=${ELAPSED}s inline=${BOT_INLINE_COMMENTS} pr_comments=${BOT_PR_COMMENTS} pr_findings=${BOT_PR_FINDINGS} reviews=${BOT_REVIEWS} review_findings=${BOT_REVIEW_FINDINGS} reactions=${BOT_TRIGGER_REACTIONS}"
+ if [ "${BOT_INLINE_COMMENTS}" -gt 0 ] || [ "${BOT_PR_FINDINGS}" -gt 0 ] || [ "${BOT_REVIEW_FINDINGS}" -gt 0 ]; then
+ echo "1"
+ exit 0
+ fi
+ if [ "${BOT_PR_COMMENTS}" -gt 0 ] || [ "${BOT_REVIEWS}" -gt 0 ] || [ "${BOT_TRIGGER_REACTIONS}" -gt 0 ]; then
+ echo ""
+ exit 0
+ fi
+ sleep "$INTERVAL"
+ ELAPSED=$((ELAPSED + INTERVAL))
+done
+echo "ERROR: Timed out waiting for re-triggered bot response." >&2
+exit 1
+```'''
+on_fail = "abort"
+condition = "${STEP_19_OUTPUT}"
+
+[[workflow.steps]]
+id = 29
+title = "Stop on Remaining Bot Findings"
+tool = "bash"
+prompt = """
+Abort merge when re-triggered bot review still reports findings.
+
+```bash
+echo "ERROR: Bot review still has findings after re-trigger. Do not merge." >&2
+exit 1
+```"""
+on_fail = "abort"
+condition = "(${STEP_19_OUTPUT}) && (${STEP_25_OUTPUT})"
+
+[[workflow.steps]]
+id = 30
+title = "Merge PR After Re-review Clean"
+tool = "bash"
+prompt = '''
+Squash-merge the PR after the second bot review returns clean, then update local main.
+
+```bash
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
+PR_NUM_LOCAL="$(printf '%s\n' "${STEP_24_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
+if [ -z "${PR_NUM_LOCAL}" ]; then
+ PR_NUM_LOCAL="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
+fi
+if [ -z "${PR_NUM_LOCAL}" ]; then
+ PR_NUM_LOCAL="$(gh pr view --json number -q '.number')"
+fi
+gh pr merge "${PR_NUM_LOCAL}" --repo "${REPO_LOCAL}" --squash --delete-branch
+git checkout main && git pull origin main
+```'''
+on_fail = "abort"
+condition = "(${STEP_19_OUTPUT}) && (!(${STEP_25_OUTPUT}))"
+
+[[workflow.steps]]
+id = 31
+title = "Merge PR (Initial Review Clean)"
+tool = "bash"
+prompt = '''
+No issues were found in the initial bot review. Merge using the PR number from step output.
+
+```bash
+REPO_LOCAL="$(gh repo view --json nameWithOwner -q '.nameWithOwner' 2>/dev/null || true)"
+if [ -z "${REPO_LOCAL}" ]; then
+ ORIGIN_URL="$(git remote get-url origin 2>/dev/null || true)"
+ REPO_LOCAL="$(printf '%s' "${ORIGIN_URL}" | sed -nE 's#(git@github\.com:|https://github\.com/)([^/]+/[^/]+)(\.git)?$#\2#p')"
+ REPO_LOCAL="${REPO_LOCAL%.git}"
+fi
+if [ -z "${REPO_LOCAL}" ]; then
+ echo "ERROR: Cannot resolve repository owner/name." >&2
+ exit 1
+fi
+PR_NUM_LOCAL="$(printf '%s\n' "${STEP_18_OUTPUT:-}" | sed -n 's/^PR_NUM=//p' | tail -n1)"
+if [ -z "${PR_NUM_LOCAL}" ]; then
+ PR_NUM_LOCAL="$(gh pr view --json number -q '.number')"
+fi
+gh pr merge "${PR_NUM_LOCAL}" --repo "${REPO_LOCAL}" --squash --delete-branch
+git checkout main && git pull origin main
+```'''
+on_fail = "abort"
+condition = "!(${STEP_19_OUTPUT})"
diff --git a/patterns/pr-codex-bot/skills/pr-codex-bot/SKILL.md b/patterns/pr-codex-bot/skills/pr-codex-bot/SKILL.md
index 51b953e4..9032dd92 100644
--- a/patterns/pr-codex-bot/skills/pr-codex-bot/SKILL.md
+++ b/patterns/pr-codex-bot/skills/pr-codex-bot/SKILL.md
@@ -169,7 +169,7 @@ csa run --skill pr-codex-bot "Review and merge the current PR"
## Integration
- **Depends on**: `csa-review` (Step 2 local review), `debate` (Step 6 false-positive arbitration)
-- **Used by**: `commit` (Step 13 auto PR), `dev-to-merge` (Steps 16-24)
+- **Used by**: `commit` (Step 13 auto PR), `dev2merge` (Steps 17-25), `dev-to-merge` (legacy alias)
- **ATOMIC with**: PR creation -- Steps 1-9 are an atomic unit; NEVER stop after PR creation
## Done Criteria
diff --git a/patterns/security-audit/skills/security-audit/SKILL.md b/patterns/security-audit/skills/security-audit/SKILL.md
index 30bca7a6..6d974281 100644
--- a/patterns/security-audit/skills/security-audit/SKILL.md
+++ b/patterns/security-audit/skills/security-audit/SKILL.md
@@ -61,7 +61,7 @@ csa run --skill security-audit "Audit the staged changes for security issues"
## Integration
-- **Used by**: `commit` (Step 7), `dev-to-merge` (Step 7)
+- **Used by**: `commit` (Step 7), `dev2merge` (Step 8), `dev-to-merge` (legacy alias)
- **May trigger**: Task creation for deferred issues (PASS_DEFERRED verdict)
## Done Criteria
diff --git a/skill.md b/skill.md
index 6a390d71..5b4699bf 100644
--- a/skill.md
+++ b/skill.md
@@ -340,14 +340,15 @@ done
| Pattern | What it does |
|---------|--------------|
| `sa` | Three-layer recursive sub-agent orchestration |
-| `dev-to-merge` | Branch-to-merge: implement, validate, PR, review, merge |
+| `dev2merge` | Branch-to-merge: plan (mktd+debate), implement, validate, PR, review, merge |
+| `dev-to-merge` | Backward-compatible alias of `dev2merge` |
| `csa-issue-reporter` | Structured GitHub issue filing for CSA errors |
**Install**:
```bash
mkdir -p .csa/plans
-for pattern in sa dev-to-merge csa-issue-reporter; do
+for pattern in sa dev2merge dev-to-merge csa-issue-reporter; do
weave compile .weave/deps/cli-sub-agent/patterns/$pattern/PATTERN.md \
--output .csa/plans/$pattern.toml
done
diff --git a/skills/AGENTS.md b/skills/AGENTS.md
index 80af3f58..f401192e 100644
--- a/skills/AGENTS.md
+++ b/skills/AGENTS.md
@@ -24,7 +24,7 @@ installs the workflow entrypoints required by `csa review` and `csa debate`.
| `csa-review` | Compatibility shim for review command scaffolding; delegates behavior to workflow protocol. |
| `debate` | Compatibility shim for debate command scaffolding and continuation protocol. |
-## Compiled Patterns (13)
+## Compiled Patterns (14)
| Pattern | Description |
|---|---|
@@ -34,6 +34,7 @@ installs the workflow entrypoints required by `csa review` and `csa debate`.
| `csa-issue-reporter` | Structured GitHub issue filing workflow for CSA runtime/tool errors. |
| `csa-review` | Independent CSA-driven code review with session isolation and structured output. |
| `debate` | Adversarial multi-tool strategy debate with escalation and convergence checks. |
+| `dev2merge` | End-to-end branch-to-merge workflow with mandatory mktd planning/debate gate. |
| `dev-to-merge` | End-to-end branch-to-merge workflow: implement, validate, PR, bot review, merge. |
| `file-audit` | Per-file AGENTS.md compliance audit with report generation workflow. |
| `mktd` | Make TODO workflow: reconnaissance, drafting, debate, and approval. |
diff --git a/weave.lock b/weave.lock
index 79866b5b..b3c71f46 100644
--- a/weave.lock
+++ b/weave.lock
@@ -1,8 +1,8 @@
package = []
[versions]
-csa = "0.1.57"
-weave = "0.1.57"
+csa = "0.1.58"
+weave = "0.1.58"
[migrations]
applied = []