diff --git a/CLAUDE.md b/CLAUDE.md
index 3e0a652..b24ef75 100644
--- a/CLAUDE.md
+++ b/CLAUDE.md
@@ -50,7 +50,7 @@ python -m pytest tests/
**Key CLI flags for the simulation:** `--scenario` (no_notice|alert_guided|advice_guided), `--messaging` (on|off), `--events` (on|off), `--web-dashboard` (on|off), `--metrics` (on|off), `--overlays` (on|off).
-**Key environment variables:** `OPENAI_MODEL` (default: `gpt-4o-mini`), `DECISION_PERIOD_S` (default: `5.0`), `NET_FILE` (default: `sumo/Repaired.rou.xml`), `SUMO_CFG` (default: `sumo/Repaired.sumocfg`), `RUN_MODE`, `REPLAY_LOG_PATH`, `EVENTS_LOG_PATH`, `METRICS_LOG_PATH`.
+**Key environment variables:** `OPENAI_MODEL` (default: `gpt-4o-mini`), `DECISION_PERIOD_S` (default: `5.0`), `NET_FILE` (default: `sumo/Repaired.net.xml`), `SUMO_CFG` (default: `sumo/Repaired.sumocfg`), `RUN_MODE`, `REPLAY_LOG_PATH`, `EVENTS_LOG_PATH`, `METRICS_LOG_PATH`.
## Architecture
@@ -80,7 +80,7 @@ python -m pytest tests/
At the top of the file (labeled `USER CONFIG`):
- `CONTROL_MODE` — `"destination"` (default) or `"route"`
-- `NET_FILE` — path to SUMO route/network file (overridable via `NET_FILE` env var; default: `sumo/Repaired.rou.xml`)
+- `NET_FILE` — path to SUMO route/network file (overridable via `NET_FILE` env var; default: `sumo/Repaired.net.xml`)
- `DESTINATION_LIBRARY` / `ROUTE_LIBRARY` — hardcoded choice menus for agents
- `OPENAI_MODEL` / `DECISION_PERIOD_S` — overridable via env vars
diff --git a/agentevac/agents/agent_state.py b/agentevac/agents/agent_state.py
index 0688090..ff7d92b 100644
--- a/agentevac/agents/agent_state.py
+++ b/agentevac/agents/agent_state.py
@@ -30,7 +30,7 @@
import math
import random
from dataclasses import dataclass, field
-from typing import Any, Dict, List, Tuple
+from typing import Any, Dict, List, Optional, Tuple
@dataclass
@@ -66,6 +66,10 @@ class AgentRuntimeState:
decision_history: List[Dict[str, Any]] = field(default_factory=list)
observation_history: List[Dict[str, Any]] = field(default_factory=list)
has_departed: bool = True
+ last_input_hash: Optional[int] = None
+ last_llm_choice_idx: Optional[int] = None
+ last_llm_reason: Optional[str] = None
+ last_llm_action: Optional[str] = None
# Global registry of all agent states, keyed by vehicle ID.
diff --git a/agentevac/agents/routing_utility.py b/agentevac/agents/routing_utility.py
index dc8ceb3..1f581f9 100644
--- a/agentevac/agents/routing_utility.py
+++ b/agentevac/agents/routing_utility.py
@@ -12,7 +12,7 @@
- ``E(option)`` : expected exposure score (``_expected_exposure``).
- ``C(option)`` : travel cost in equivalent minutes (``_travel_cost``).
-Expected exposure combines four components:
+Expected exposure combines four components (in ``alert_guided`` and ``advice_guided``):
1. **risk_sum** : Sum of edge-level fire risk scores along the route (or fastest path).
Scaled by a severity multiplier that increases with ``p_risky`` and ``p_danger``.
2. **blocked_edges** : Number of edges along the route that are currently inside a
@@ -23,8 +23,14 @@
4. **uncertainty_penalty** : A penalty proportional to ``(1 - confidence)`` that
discourages fragile choices when the agent is unsure of the hazard.
-Annotated menus (``annotate_menu_with_expected_utility``) are used in the *advice_guided*
-scenario so the LLM receives pre-computed utility context alongside each option.
+In ``no_notice`` mode, agents lack route-specific fire data. Exposure is instead
+estimated from the agent's general belief state scaled by route length — longer routes
+mean more time exposed to whatever danger the agent perceives.
+
+Annotated menus (``annotate_menu_with_expected_utility``) are computed for all three
+scenarios so the LLM always receives a utility score. The *precision* of the exposure
+estimate varies by information regime: belief-only (no_notice), current fire state
+(alert_guided), or current fire state with full route-head data (advice_guided).
"""
from typing import Any, Dict, List
@@ -107,6 +113,51 @@ def _travel_cost(menu_item: Dict[str, Any]) -> float:
return _num(edge_count, 0.0) * 0.25
+def _observation_based_exposure(
+ menu_item: Dict[str, Any],
+ belief: Dict[str, Any],
+ psychology: Dict[str, Any],
+) -> float:
+ """Estimate hazard exposure when route-specific fire data is unavailable.
+
+ Used in the ``no_notice`` scenario where agents have only their own noisy
+ observation of the current edge. Without per-route fire metrics (risk_sum,
+ blocked_edges, min_margin_m), exposure is derived from the agent's general
+ belief state scaled by route length:
+
+ hazard_level = 0.3 * p_risky + 0.7 * p_danger + 0.4 * perceived_risk
+ length_factor = len_edges * 0.15
+ exposure = hazard_level * length_factor + uncertainty_penalty
+
+ Longer routes are penalised more because a longer route means more time
+ spent driving through a potentially hazardous environment. The coefficients
+ prioritise ``p_danger`` (0.7) over ``p_risky`` (0.3) to maintain consistency
+ with the severity weighting in ``_expected_exposure``.
+
+ Args:
+ menu_item: A destination or route dict.
+ belief: The agent's current Bayesian belief dict.
+ psychology: The agent's current psychology dict (perceived_risk, confidence).
+
+ Returns:
+ Expected exposure score (>= 0; higher = more hazardous).
+ """
+ p_risky = _num(belief.get("p_risky"), 1.0 / 3.0)
+ p_danger = _num(belief.get("p_danger"), 1.0 / 3.0)
+ perceived_risk = _num(psychology.get("perceived_risk"), p_danger)
+ confidence = _num(psychology.get("confidence"), 0.0)
+
+ hazard_level = 0.3 * p_risky + 0.7 * p_danger + 0.4 * perceived_risk
+ len_edges = _num(
+ menu_item.get("len_edges", menu_item.get("len_edges_fastest_path")),
+ 1.0,
+ )
+ length_factor = len_edges * 0.15
+ uncertainty_penalty = max(0.0, 1.0 - confidence) * 0.75
+
+ return hazard_level * length_factor + uncertainty_penalty
+
+
def _expected_exposure(
menu_item: Dict[str, Any],
belief: Dict[str, Any],
@@ -227,14 +278,19 @@ def annotate_menu_with_expected_utility(
belief: Dict[str, Any],
psychology: Dict[str, Any],
profile: Dict[str, Any],
+ scenario: str = "advice_guided",
) -> List[Dict[str, Any]]:
"""Annotate each menu option in-place with its expected utility and component breakdown.
For *destination* mode, unreachable options (``reachable=False``) receive
``expected_utility=None`` and a minimal component dict to signal their exclusion.
- The annotated menu is later filtered by ``scenarios.filter_menu_for_scenario`` so
- that utility scores are only visible to agents in the *advice_guided* regime.
+ The ``scenario`` parameter controls which exposure function is used:
+
+ - ``"no_notice"``: ``_observation_based_exposure`` — uses only the agent's belief
+ state and route length (no route-specific fire data).
+ - ``"alert_guided"`` / ``"advice_guided"``: ``_expected_exposure`` — uses route-
+ specific fire metrics (risk_sum, blocked_edges, min_margin_m).
Args:
menu: List of destination or route dicts (mutated in-place).
@@ -242,6 +298,8 @@ def annotate_menu_with_expected_utility(
belief: The agent's current Bayesian belief dict.
psychology: The agent's current psychology dict.
profile: The agent's profile dict (supplies ``lambda_e``, ``lambda_t``).
+ scenario: Active information regime (``"no_notice"``, ``"alert_guided"``,
+ or ``"advice_guided"``). Controls which exposure function is used.
Returns:
The same ``menu`` list, with each item updated to include:
@@ -249,6 +307,9 @@ def annotate_menu_with_expected_utility(
- ``utility_components``: Dict with lambda_e, lambda_t, expected_exposure,
travel_cost (and ``reachable=False`` if unreachable).
"""
+ use_observation_exposure = str(scenario).strip().lower() == "no_notice"
+ exposure_fn = _observation_based_exposure if use_observation_exposure else _expected_exposure
+
for item in menu:
if mode == "destination":
if not item.get("reachable", False):
@@ -260,18 +321,17 @@ def annotate_menu_with_expected_utility(
"reachable": False,
}
continue
- expected_exposure = _expected_exposure(item, belief, psychology)
- travel_cost = _travel_cost(item)
- utility = score_destination_utility(item, belief, psychology, profile)
- else:
- expected_exposure = _expected_exposure(item, belief, psychology)
- travel_cost = _travel_cost(item)
- utility = score_route_utility(item, belief, psychology, profile)
+
+ lambda_e = max(0.0, _num(profile.get("lambda_e"), 1.0))
+ lambda_t = max(0.0, _num(profile.get("lambda_t"), 0.1))
+ expected_exposure = exposure_fn(item, belief, psychology)
+ travel_cost = _travel_cost(item)
+ utility = -((lambda_e * expected_exposure) + (lambda_t * travel_cost))
item["expected_utility"] = round(utility, 4)
item["utility_components"] = {
- "lambda_e": round(max(0.0, _num(profile.get("lambda_e"), 1.0)), 4),
- "lambda_t": round(max(0.0, _num(profile.get("lambda_t"), 0.1)), 4),
+ "lambda_e": round(lambda_e, 4),
+ "lambda_t": round(lambda_t, 4),
"expected_exposure": round(expected_exposure, 4),
"travel_cost": round(travel_cost, 4),
}
diff --git a/agentevac/agents/scenarios.py b/agentevac/agents/scenarios.py
index 735c067..a92cc62 100644
--- a/agentevac/agents/scenarios.py
+++ b/agentevac/agents/scenarios.py
@@ -5,8 +5,9 @@
**no_notice** — No official warning exists yet.
Agents rely solely on their own noisy margin observations and natural-language
- messages from neighbours. Menu items contain only minimal fields (name,
- reachability). This represents the typical onset of a rapidly spreading wildfire
+ messages from neighbours. Menu items include route identity, travel time, and
+ an observation-based utility score (local road knowledge), but no fire-specific
+ risk metrics. This represents the typical onset of a rapidly spreading wildfire
before emergency services have issued formal guidance.
**alert_guided** — Official alerts broadcast general hazard information.
@@ -68,7 +69,7 @@ def load_scenario_config(mode: str) -> Dict[str, Any]:
"forecast_visible": False,
"route_head_forecast_visible": False,
"official_route_guidance_visible": False,
- "expected_utility_visible": False,
+ "expected_utility_visible": True,
"neighborhood_observation_visible": True,
}
if name == "alert_guided":
@@ -81,7 +82,7 @@ def load_scenario_config(mode: str) -> Dict[str, Any]:
"forecast_visible": True,
"route_head_forecast_visible": False,
"official_route_guidance_visible": False,
- "expected_utility_visible": False,
+ "expected_utility_visible": True,
"neighborhood_observation_visible": True,
}
return {
@@ -194,22 +195,27 @@ def filter_menu_for_scenario(
for item in menu:
out = dict(item)
if not cfg["official_route_guidance_visible"]:
- # Remove advisory labels produced by the operator briefing logic.
+ # Remove advisory labels and authority source produced by the operator briefing logic.
out.pop("advisory", None)
out.pop("briefing", None)
out.pop("reasons", None)
-
- if not cfg["expected_utility_visible"]:
- # Remove pre-computed utility scores so agents cannot use them as a shortcut.
- out.pop("expected_utility", None)
- out.pop("utility_components", None)
+ out.pop("guidance_source", None)
if cfg["mode"] == "no_notice":
- # Reduce to the bare minimum an agent could reasonably know without warnings.
+ # Keep fields an agent could plausibly know from local familiarity:
+ # route identity, reachability, travel time/length (local knowledge),
+ # and observation-based utility scores.
if control_mode == "destination":
- keep_keys = {"idx", "name", "dest_edge", "reachable", "note"}
+ keep_keys = {
+ "idx", "name", "dest_edge", "reachable", "note",
+ "travel_time_s_fastest_path", "len_edges_fastest_path",
+ "expected_utility", "utility_components",
+ }
else:
- keep_keys = {"idx", "name", "len_edges"}
+ keep_keys = {
+ "idx", "name", "len_edges",
+ "expected_utility", "utility_components",
+ }
out = {k: v for k, v in out.items() if k in keep_keys}
prompt_menu.append(out)
@@ -240,17 +246,14 @@ def scenario_prompt_suffix(mode: str) -> str:
)
if cfg["mode"] == "alert_guided":
return (
- "This is an alert-guided scenario: official alerts describe the fire, but they do not prescribe a route. "
- # "Use forecast and hazard cues, but make your own navigation choice."
- "but do not prescribe a specific route. Do NOT invent route guidance. Use the provided official alert content, "
- "hazard and forecast cues (if provided), and local road conditions to choose when, where and how to evacuate."
-
+ "This is an alert-guided scenario: official alerts describe the fire but do not prescribe a specific route. "
+ "Do NOT invent route guidance. Use the provided official alert content, "
+ "hazard and forecast cues, and local road conditions to decide when, where, and how to evacuate."
)
return (
- "This is an advice-guided scenario: official alerts include route-oriented guidance. "
- "You may use advisories, briefings, and expected utility as formal support. "
- # "ADVICE-GUIDED scenario: officials issue an evacuation *order* (leave immediately) and include route-oriented guidance (may be high-level and may change)."
- "Default to following designated routes/instructions unless they are blocked, unsafe "
- "or extremely congested; if deviating, state why and pick the safest feasible alternative. Stay responsive to updates."
-
+ "This is an advice-guided evacuation: the Emergency Operations Center has issued official route guidance for your area. "
+ "Follow routes marked advisory='Recommended' unless they are physically blocked or impassable. "
+ "If you must deviate from official guidance, state why and choose the safest feasible alternative. "
+ "Delayed departure or ignoring recommended routes increases your exposure to dangerous fire conditions. "
+ "Stay responsive to updated guidance as conditions change."
)
diff --git a/agentevac/analysis/metrics.py b/agentevac/analysis/metrics.py
index 91f7678..f29545a 100644
--- a/agentevac/analysis/metrics.py
+++ b/agentevac/analysis/metrics.py
@@ -137,6 +137,10 @@ def record_arrival(self, agent_id: str, sim_t_s: float) -> None:
self._arrival_times[agent_id] = float(sim_t_s)
self._last_seen_time[agent_id] = float(sim_t_s)
+ def arrived_count(self) -> int:
+ """Return the number of agents that have arrived at their destination."""
+ return len(self._arrival_times)
+
def observe_active_vehicles(self, active_vehicle_ids: List[str], sim_t_s: float) -> None:
"""Update the active-vehicle set for live bookkeeping only.
diff --git a/agentevac/simulation/main.py b/agentevac/simulation/main.py
index e0204d9..016ec40 100644
--- a/agentevac/simulation/main.py
+++ b/agentevac/simulation/main.py
@@ -22,6 +22,7 @@
**Key environment variables (override defaults without CLI):**
OPENAI_MODEL : LLM model ID (default: gpt-4o-mini).
DECISION_PERIOD_S : Seconds between LLM decision rounds (default: 5.0).
+ SIM_END_TIME_S : Max simulation duration in seconds (default: 1200).
RUN_MODE : record | replay.
REPLAY_LOG_PATH : Path to the JSONL replay log.
EVENTS_LOG_PATH : Base path for the event stream JSONL.
@@ -97,6 +98,7 @@
from agentevac.utils.replay import RouteReplay
# ---- OpenAI (LLM control) ----
+from concurrent.futures import ThreadPoolExecutor
from openai import OpenAI
from pydantic import BaseModel, Field, conint, create_model
@@ -123,7 +125,7 @@
CONTROL_MODE = "destination"
# Your SUMO net file used by the .sumocfg (needed for edge geometry)
-NET_FILE = os.getenv("NET_FILE", "sumo/Repaired.rou.xml") # override via NET_FILE env var
+NET_FILE = os.getenv("NET_FILE", "sumo/Repaired.net.xml") # override via NET_FILE env var
# OpenAI model + decision cadence
OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o-mini")
@@ -271,6 +273,7 @@ def _parse_cli_args() -> argparse.Namespace:
parser.add_argument("--delay-heavy-ratio", type=float, help="Max delay ratio for 'heavy delay'.")
parser.add_argument("--recommended-min-margin-m", type=float, help="Min margin for advisory='Recommended'.")
parser.add_argument("--caution-min-margin-m", type=float, help="Min margin for advisory='Use with caution'.")
+ parser.add_argument("--sim-end-time", type=float, help="Simulation end time in seconds (default: 1200).")
return parser.parse_args()
@@ -434,6 +437,9 @@ def _agent_profile(agent_id: str) -> Dict[str, float]:
CAUTION_MIN_MARGIN_M = _float_from_env_or_cli(
CLI_ARGS.caution_min_margin_m, "CAUTION_MIN_MARGIN_M", 100.0
)
+SIM_END_TIME_S = _float_from_env_or_cli(
+ CLI_ARGS.sim_end_time, "SIM_END_TIME_S", 1200.0
+)
if not (0.0 <= MARGIN_VERY_CLOSE_M <= MARGIN_NEAR_M <= MARGIN_BUFFERED_M):
sys.exit(
@@ -521,16 +527,19 @@ def _agent_profile(agent_id: str) -> Dict[str, float]:
FIRE_SOURCES = [
# {"id": "F0", "t0": 0.0, "x": 9000.0, "y": 9000.0, "r0": 3000.0, "growth_m_per_s": 0.20},
# {"id": "F0_1", "t0": 0.0, "x": 9000.0, "y": 27000.0, "r0": 3000.0, "growth_m_per_s": 0.20},
-{"id": "F0", "t0": 0.0, "x": 22000.0, "y": 9000.0, "r0": 3000.0, "growth_m_per_s": 0.20},
- {"id": "F0_1", "t0": 0.0, "x": 24000.0, "y": 6000.0, "r0": 3000.0, "growth_m_per_s": 0.20},
+{"id": "F0", "t0": 0.0, "x": 22000.0, "y": 9000.0, "r0": 3000.0, "growth_m_per_s": 0.02},
+ {"id": "F0_1", "t0": 0.0, "x": 24000.0, "y": 6000.0, "r0": 3000.0, "growth_m_per_s": 0.02},
+
+
]
NEW_FIRE_EVENTS = [
# {"id": "F1", "t0": 100.0, "x": 5000.0, "y": 4500.0, "r0": 2000.0, "growth_m_per_s": 0.30},
# {"id": "F0_2", "t0": 50.0, "x": 15000.0, "y": 21000.0, "r0": 3000.0, "growth_m_per_s": 0.20},
# {"id": "F0_3", "t0": 75.0, "x": 15000.0, "y": 15000.0, "r0": 3000.0, "growth_m_per_s": 0.20},
- {"id": "F1", "t0": 25.0, "x": 20000.0, "y": 12000.0, "r0": 2000.0, "growth_m_per_s": 0.30},
- {"id": "F0_2", "t0": 30.0, "x": 18000.0, "y": 14000.0, "r0": 3000.0, "growth_m_per_s": 0.20},
- {"id": "F0_3", "t0": 45.0, "x": 15000.0, "y": 18000.0, "r0": 3000.0, "growth_m_per_s": 0.20},
+ {"id": "F1_4", "t0": 90.0, "x": 20000.0, "y": 6000.0, "r0": 3000.0, "growth_m_per_s": 0.02},
+ {"id": "F1", "t0": 150.0, "x": 20000.0, "y": 12000.0, "r0": 2000.0, "growth_m_per_s": 0.02},
+ {"id": "F1_2", "t0": 210.0, "x": 18000.0, "y": 14000.0, "r0": 3000.0, "growth_m_per_s": 0.02},
+ {"id": "F1_3", "t0": 270.0, "x": 15000.0, "y": 18000.0, "r0": 3000.0, "growth_m_per_s": 0.02},
]
@@ -1377,6 +1386,7 @@ def _run_parameter_payload() -> Dict[str, Any]:
return {
"run_mode": RUN_MODE,
"scenario": SCENARIO_MODE,
+ "sim_end_time_s": SIM_END_TIME_S,
"sumo_binary": SUMO_BINARY,
"messaging_controls": {
"enabled": MESSAGING_ENABLED,
@@ -1552,6 +1562,7 @@ def _run_parameter_payload() -> Dict[str, Any]:
total_speed = 0
client = OpenAI() # uses OPENAI_API_KEY
+MAX_CONCURRENT_LLM = int(os.environ.get("MAX_CONCURRENT_LLM", "20"))
veh_last_choice: Dict[str, int] = {}
decision_round_counter = 0
agent_round_history: Dict[str, deque] = {}
@@ -1967,8 +1978,9 @@ def build_driver_briefing(
reasons.append(f"Route is {proximity_phrase}.")
reasons.append(f"Expected pace: {delay_phrase}.")
- briefing = f"{advisory}: route {passability}, {proximity_phrase}, {delay_phrase}."
+ briefing = f"Emergency management assessment — {advisory}: route is currently {passability}, {proximity_phrase}, {delay_phrase}."
return {
+ "guidance_source": "Emergency Operations Center",
"advisory": advisory,
"briefing": briefing,
"reasons": reasons,
@@ -1978,6 +1990,31 @@ def build_driver_briefing(
}
+def _decision_input_hash(
+ edge: str,
+ belief: Dict[str, Any],
+ inbox_len: int,
+ margin_m: Optional[float],
+ menu_utilities: Optional[tuple] = None,
+) -> int:
+ """Compute a hash of the key LLM decision inputs for cache-skip detection.
+
+ Rounded values prevent false misses from floating-point noise while still
+ detecting meaningful state changes.
+ """
+ key = (
+ edge,
+ round(float(belief.get("p_danger", 0)), 2),
+ round(float(belief.get("p_safe", 0)), 2),
+ round(float(belief.get("p_risky", 0)), 2),
+ belief.get("uncertainty_bucket"),
+ inbox_len,
+ round(float(margin_m or 0), 0),
+ menu_utilities,
+ )
+ return hash(key)
+
+
def _round_or_none(value: Optional[float], digits: int = 2) -> Optional[float]:
if value is None:
return None
@@ -2340,6 +2377,8 @@ def forecast_edge_risk(edge_id: str) -> Tuple[bool, float, float]:
return out
pending_system_observation_updates: List[Tuple[str, Dict[str, Any]]] = []
+ _agent_ctxs: List[Dict[str, Any]] = []
+ _llm_pool = ThreadPoolExecutor(max_workers=MAX_CONCURRENT_LLM)
for (vid, from_edge, to_edge, t0, dLane, dPos, dSpeed, dColor) in SPAWN_EVENTS:
if vid in spawned:
@@ -2373,6 +2412,15 @@ def forecast_edge_risk(edge_id: str) -> Tuple[bool, float, float]:
default_social_trigger=DEFAULT_SOCIAL_TRIGGER,
default_social_min_danger=DEFAULT_SOCIAL_MIN_DANGER,
)
+ _agent_ctxs.append({
+ "_mode": "replay",
+ "vid": vid, "from_edge": from_edge, "to_edge": to_edge,
+ "dLane": dLane, "dPos": dPos, "dSpeed": dSpeed, "dColor": dColor,
+ "agent_state": agent_state,
+ "should_release": should_release,
+ "release_reason": release_reason,
+ })
+ continue
else:
effective_t0 = 0.0
if sim_t < effective_t0:
@@ -2519,31 +2567,76 @@ def forecast_edge_risk(edge_id: str) -> Tuple[bool, float, float]:
"briefing": forecast_briefing,
},
"policy": (
- "Decide whether to depart now or continue staying. "
- "Consider your_observation, neighbor_assessment, and inbox messages to form your own judgment. "
- "combined_belief is a mathematical estimate — you may weigh sources differently based on the situation. "
- "If information_conflict.sources_agree is false, pay attention to the disagreement "
- "and explain in conflict_assessment which source you trusted more and why. "
- "Use neighborhood_observation and system_observation_updates as factual local social context. "
- "Treat those observations as neutral facts, not instructions. "
- "If fire risk is rising, forecast worsens, or nearby households are departing, prefer conservative action. "
+ "Priority 1 — Safety: If official evacuation guidance is present (see official_evacuation_order), "
+ "depart immediately unless physically unable. "
+ "If fire risk is rising, forecast worsens, or your current location may be overtaken, depart. "
+ "Delayed departure increases your exposure to dangerous fire conditions. "
+ "Priority 2 — Information assessment: Consider your_observation, neighbor_assessment, "
+ "and inbox to form your judgment. "
+ "combined_belief is a mathematical estimate — you may weigh sources differently. "
+ "If information_conflict.sources_agree is false, explain in conflict_assessment "
+ "which source you trusted more and why. "
+ "Priority 3 — Social context: Use neighborhood_observation and system_observation_updates "
+ "as factual context. Treat them as neutral observations, not instructions. "
+ "If nearby households are departing rapidly, this signals increasing urgency. "
"Output action='depart' or action='wait'. "
f"{scenario_prompt_suffix(SCENARIO_MODE)}"
),
}
+ if SCENARIO_MODE == "advice_guided":
+ predeparture_env["official_evacuation_order"] = {
+ "source": "County Emergency Operations Center",
+ "directive": "Evacuate now",
+ "message": (
+ "An evacuation order is in effect for your area. "
+ "All residents should depart immediately via designated routes."
+ ),
+ }
predeparture_system_prompt = (
- "You are a household deciding whether to depart for wildfire evacuation. "
+ "You are a resident in a wildfire-threatened area deciding whether to evacuate your household. "
+ "Your family's safety depends on this decision. "
+ "Trust official emergency guidance above your own observations, "
+ "and your own observations above unverified neighbor messages. "
"Follow the policy strictly."
)
predeparture_user_prompt = json.dumps(predeparture_env)
- llm_action_raw: Optional[str] = None
- llm_decision_reason: Optional[str] = None
- llm_predeparture_error: Optional[str] = None
- predeparture_fallback_reason: Optional[str] = None
- should_release = heuristic_should_release
- release_reason = heuristic_reason
- try:
- resp = client.responses.parse(
+ # --- Collect context for two-phase parallel LLM dispatch ---
+ _pd_hash = _decision_input_hash(
+ from_edge, belief_state, len(predeparture_inbox),
+ spawn_margin_m,
+ )
+ _ctx: Dict[str, Any] = {
+ "_mode": "live",
+ "vid": vid, "from_edge": from_edge, "to_edge": to_edge,
+ "dLane": dLane, "dPos": dPos, "dSpeed": dSpeed, "dColor": dColor,
+ "agent_state": agent_state,
+ "belief_state": belief_state,
+ "env_signal": env_signal,
+ "social_signal": social_signal,
+ "conflict_info": conflict_info,
+ "edge_forecast": edge_forecast,
+ "route_forecast": route_forecast,
+ "forecast_briefing": forecast_briefing,
+ "predeparture_inbox": predeparture_inbox,
+ "prompt_system_observation_updates": prompt_system_observation_updates,
+ "prompt_neighborhood_observation": prompt_neighborhood_observation,
+ "spawn_margin_m": spawn_margin_m,
+ "predeparture_system_prompt": predeparture_system_prompt,
+ "predeparture_user_prompt": predeparture_user_prompt,
+ "predeparture_env": predeparture_env,
+ "pd_hash": _pd_hash,
+ "heuristic_should_release": heuristic_should_release,
+ "heuristic_reason": heuristic_reason,
+ }
+ if (
+ agent_state.last_input_hash == _pd_hash
+ and agent_state.last_llm_action is not None
+ ):
+ _ctx["_cached"] = True
+ else:
+ _ctx["_cached"] = False
+ _ctx["_future"] = _llm_pool.submit(
+ client.responses.parse,
model=OPENAI_MODEL,
input=[
{"role": "system", "content": predeparture_system_prompt},
@@ -2551,69 +2644,135 @@ def forecast_edge_risk(edge_id: str) -> Tuple[bool, float, float]:
],
text_format=PreDepartureDecisionModel,
)
- predeparture_decision = resp.output_parsed
- llm_action_raw = str(getattr(predeparture_decision, "action", "") or "").strip().lower()
- llm_decision_reason = getattr(predeparture_decision, "reason", None)
+ _agent_ctxs.append(_ctx)
+ continue # defer processing to Phase 2 below
+
+ # ---- Phase 2: Wait for all LLM futures, then process results ----
+ _llm_pool.shutdown(wait=True)
+
+ for _ctx in _agent_ctxs:
+ vid = _ctx["vid"]
+ from_edge = _ctx["from_edge"]
+ to_edge = _ctx["to_edge"]
+ dLane = _ctx["dLane"]
+ dPos = _ctx["dPos"]
+ dSpeed = _ctx["dSpeed"]
+ dColor = _ctx["dColor"]
+ agent_state = _ctx["agent_state"]
+
+ if _ctx["_mode"] == "replay":
+ should_release = _ctx["should_release"]
+ release_reason = _ctx["release_reason"]
+ else:
+ # Record/live mode: process LLM result
+ belief_state = _ctx["belief_state"]
+ env_signal = _ctx["env_signal"]
+ social_signal = _ctx["social_signal"]
+ predeparture_system_prompt = _ctx["predeparture_system_prompt"]
+ predeparture_user_prompt = _ctx["predeparture_user_prompt"]
+ heuristic_reason = _ctx["heuristic_reason"]
+ predeparture_inbox = _ctx["predeparture_inbox"]
+ prompt_system_observation_updates = _ctx["prompt_system_observation_updates"]
+ prompt_neighborhood_observation = _ctx["prompt_neighborhood_observation"]
+ edge_forecast = _ctx["edge_forecast"]
+ route_forecast = _ctx["route_forecast"]
+ forecast_briefing = _ctx["forecast_briefing"]
+ conflict_info = _ctx["conflict_info"]
+
+ llm_action_raw: Optional[str] = None
+ llm_decision_reason: Optional[str] = None
+ llm_predeparture_error: Optional[str] = None
+ predeparture_fallback_reason: Optional[str] = None
+ should_release = _ctx["heuristic_should_release"]
+ release_reason = _ctx["heuristic_reason"]
+
+ if _ctx["_cached"]:
+ llm_action_raw = agent_state.last_llm_action
+ llm_decision_reason = agent_state.last_llm_reason
if llm_action_raw in {"depart", "leave", "depart_now"}:
should_release = True
- release_reason = "llm_depart"
- elif llm_action_raw in {"wait", "stay", "hold"}:
- should_release = False
- release_reason = "llm_wait"
+ release_reason = "llm_depart_cached"
else:
- raise ValueError(f"Unsupported predeparture action: {llm_action_raw!r}")
- llm_conflict_assessment = getattr(predeparture_decision, "conflict_assessment", None)
- if EVENTS_ENABLED:
- events.emit(
- "predeparture_llm_decision",
- summary=f"{vid} action={llm_action_raw}",
- veh_id=vid,
- action=llm_action_raw,
- reason=llm_decision_reason,
- conflict_assessment=llm_conflict_assessment,
- round=decision_round_counter,
- sim_t_s=sim_t,
- )
+ should_release = False
+ release_reason = "llm_wait_cached"
replay.record_llm_dialog(
- step=step_idx,
- sim_t_s=sim_t,
- veh_id=vid,
- control_mode="predeparture",
- model=OPENAI_MODEL,
+ step=step_idx, sim_t_s=sim_t, veh_id=vid,
+ control_mode="predeparture", model=OPENAI_MODEL,
system_prompt=predeparture_system_prompt,
user_prompt=predeparture_user_prompt,
- response_text=getattr(resp, "output_text", None),
- parsed=predeparture_decision.model_dump()
- if hasattr(predeparture_decision, "model_dump")
- else None,
- error=None,
+ response_text=f"[cached] action={llm_action_raw}",
+ parsed=None, error=None,
)
- except Exception as e:
- llm_predeparture_error = str(e)
- predeparture_fallback_reason = "heuristic_predeparture_fallback"
- should_release = heuristic_should_release
- release_reason = heuristic_reason
- if EVENTS_ENABLED:
- events.emit(
- "predeparture_llm_error",
- summary=f"{vid} error={e}",
+ else:
+ try:
+ resp = _ctx["_future"].result(timeout=60)
+ predeparture_decision = resp.output_parsed
+ llm_action_raw = str(getattr(predeparture_decision, "action", "") or "").strip().lower()
+ llm_decision_reason = getattr(predeparture_decision, "reason", None)
+ if llm_action_raw in {"depart", "leave", "depart_now"}:
+ should_release = True
+ release_reason = "llm_depart"
+ elif llm_action_raw in {"wait", "stay", "hold"}:
+ should_release = False
+ release_reason = "llm_wait"
+ else:
+ raise ValueError(f"Unsupported predeparture action: {llm_action_raw!r}")
+ llm_conflict_assessment = getattr(predeparture_decision, "conflict_assessment", None)
+ if EVENTS_ENABLED:
+ events.emit(
+ "predeparture_llm_decision",
+ summary=f"{vid} action={llm_action_raw}",
+ veh_id=vid,
+ action=llm_action_raw,
+ reason=llm_decision_reason,
+ conflict_assessment=llm_conflict_assessment,
+ round=decision_round_counter,
+ sim_t_s=sim_t,
+ )
+ replay.record_llm_dialog(
+ step=step_idx,
+ sim_t_s=sim_t,
veh_id=vid,
- error=str(e),
- round=decision_round_counter,
+ control_mode="predeparture",
+ model=OPENAI_MODEL,
+ system_prompt=predeparture_system_prompt,
+ user_prompt=predeparture_user_prompt,
+ response_text=getattr(resp, "output_text", None),
+ parsed=predeparture_decision.model_dump()
+ if hasattr(predeparture_decision, "model_dump")
+ else None,
+ error=None,
+ )
+ except Exception as e:
+ llm_predeparture_error = str(e)
+ predeparture_fallback_reason = "heuristic_predeparture_fallback"
+ should_release = _ctx["heuristic_should_release"]
+ release_reason = _ctx["heuristic_reason"]
+ if EVENTS_ENABLED:
+ events.emit(
+ "predeparture_llm_error",
+ summary=f"{vid} error={e}",
+ veh_id=vid,
+ error=str(e),
+ round=decision_round_counter,
+ sim_t_s=sim_t,
+ )
+ replay.record_llm_dialog(
+ step=step_idx,
sim_t_s=sim_t,
+ veh_id=vid,
+ control_mode="predeparture",
+ model=OPENAI_MODEL,
+ system_prompt=predeparture_system_prompt,
+ user_prompt=predeparture_user_prompt,
+ response_text=None,
+ parsed=None,
+ error=str(e),
)
- replay.record_llm_dialog(
- step=step_idx,
- sim_t_s=sim_t,
- veh_id=vid,
- control_mode="predeparture",
- model=OPENAI_MODEL,
- system_prompt=predeparture_system_prompt,
- user_prompt=predeparture_user_prompt,
- response_text=None,
- parsed=None,
- error=str(e),
- )
+ agent_state.last_input_hash = _ctx["pd_hash"]
+ agent_state.last_llm_action = llm_action_raw
+ agent_state.last_llm_reason = llm_decision_reason
+
replay.record_agent_cognition(
step=step_idx,
sim_t_s=sim_t,
@@ -3255,20 +3414,32 @@ def record_agent_memory(
belief=belief_state,
psychology=agent_state.psychology,
profile=agent_state.profile,
+ scenario=SCENARIO_MODE,
)
prompt_destination_menu = filter_menu_for_scenario(
SCENARIO_MODE,
menu,
control_mode="destination",
)
- utility_policy = (
- "Use expected_utility as the main safety-efficiency tradeoff score; higher is better. "
- if SCENARIO_CONFIG["expected_utility_visible"]
- else "Do not assume a precomputed utility score is available in this scenario. "
- )
+ _utility_basis = {
+ "no_notice": (
+ "expected_utility is available for all options; higher (less negative) is better. "
+ "Scores reflect your general hazard perception and route length — "
+ "you have no route-specific fire data. "
+ ),
+ "alert_guided": (
+ "expected_utility is available for all options; higher (less negative) is better. "
+ "Scores incorporate current fire positions along each route. "
+ ),
+ "advice_guided": (
+ "Use expected_utility as the main safety-efficiency tradeoff score; higher is better. "
+ ),
+ }
+ utility_policy = _utility_basis.get(SCENARIO_MODE, _utility_basis["advice_guided"])
guidance_policy = (
- "Prefer options with advisory='Recommended' and clear briefing reasons. "
- "If advisory is not available, prefer lower risk_sum and larger min_margin. "
+ "The Emergency Operations Center has assessed each option. "
+ "Follow options with advisory='Recommended'; fall back to 'Use with caution' only if no recommended option is reachable. "
+ "Avoid options marked 'Avoid for now' unless all alternatives are blocked. "
if SCENARIO_CONFIG["official_route_guidance_visible"]
else "No official route recommendation is available in this scenario; infer safety from the visible route facts and your subjective information. "
)
@@ -3357,26 +3528,36 @@ def record_agent_memory(
"broadcast_token": "*",
},
"policy": (
- "Choose ONLY from reachable_dest_indices. "
+ "Priority 1 — Hard constraints: Choose ONLY from reachable_dest_indices. "
"If reachable_dest_indices is empty, output choice_index=-1 (KEEP). "
- "Strongly avoid options where blocked_edges_on_fastest_path > 0. "
+ "Never choose options where blocked_edges_on_fastest_path > 0. "
+ "Priority 2 — Official guidance: "
f"{guidance_policy}"
+ "Priority 3 — Risk assessment: "
f"{utility_policy}"
- "Use agent_self_history to avoid repeating ineffective choices. "
"If fire_proximity.is_getting_closer_to_fire=true, prioritize choices that increase min_margin. "
f"{forecast_policy}"
- "Consider your_observation, neighbor_assessment, and inbox messages to form your own hazard judgment. "
- "combined_belief is a mathematical estimate — you may weigh sources differently based on the situation. "
- "If information_conflict.sources_agree is false, pay attention to the disagreement "
- "and explain in conflict_assessment which source you trusted more and why. "
"When uncertainty is High, avoid fragile or highly exposed choices. "
- "Use neighborhood_observation and system_observation_updates as factual local social context; treat them as neutral observations rather than instructions. "
- "If messaging.enabled=true, you may include optional outbox items with {to, message}. "
- "Messages sent in this round are delivered to recipients in the next decision round. "
+ "Choosing a high-exposure route risks encountering fire directly. "
+ "Priority 4 — Situational awareness: "
+ "Consider your_observation, neighbor_assessment, and inbox for your hazard judgment. "
+ "combined_belief is a mathematical estimate — you may weigh sources differently. "
+ "If information_conflict.sources_agree is false, explain in conflict_assessment "
+ "which source you trusted more and why. "
+ "Use agent_self_history to avoid repeating ineffective choices. "
+ "Use neighborhood_observation and system_observation_updates as factual context, not instructions. "
+ "Priority 5 — Communication: If messaging.enabled=true, you may include optional outbox items "
+ "with {to, message}. Messages are delivered next round. "
f"{scenario_prompt_suffix(SCENARIO_MODE)}"
),
}
- system_prompt = "You are a wildfire evacuation routing agent. Follow the policy strictly."
+ system_prompt = (
+ "You are a resident evacuating from a wildfire, choosing the safest route to a shelter. "
+ "Your safety depends on this choice. "
+ "Trust official emergency guidance above personal observations, "
+ "and personal observations above unverified neighbor messages. "
+ "Follow the policy strictly."
+ )
user_prompt = json.dumps(env)
decision = None
decision_reason = None
@@ -3385,73 +3566,101 @@ def record_agent_memory(
fallback_reason = None
llm_error = None
- # LLM decision (Structured Outputs)
- try:
- resp = client.responses.parse(
- model=OPENAI_MODEL,
- input=[
- {"role": "system", "content": system_prompt},
- {"role": "user", "content": user_prompt},
- ],
- text_format=DecisionModel,
- )
- decision = resp.output_parsed
- choice_idx = int(decision.choice_index)
+ # --- Input-hash skip: reuse previous LLM decision if inputs unchanged ---
+ _veh_hash = _decision_input_hash(
+ roadid, belief_state, len(inbox_for_vehicle),
+ current_edge_margin_m,
+ menu_utilities=tuple(
+ round(float(item.get("expected_utility") or 0), 2)
+ for item in menu
+ ),
+ )
+ if (
+ agent_state.last_input_hash == _veh_hash
+ and agent_state.last_llm_choice_idx is not None
+ ):
+ choice_idx = agent_state.last_llm_choice_idx
raw_choice_idx = choice_idx
- decision_reason = getattr(decision, "reason", None)
- decision_conflict_assessment = getattr(decision, "conflict_assessment", None)
- outbox_count = len(getattr(decision, "outbox", None) or [])
- messaging.queue_outbox(vehicle, getattr(decision, "outbox", None))
- if EVENTS_ENABLED:
- events.emit(
- "llm_decision",
- summary=f"{vehicle} choice={choice_idx} outbox={outbox_count}",
- veh_id=vehicle,
- choice_idx=choice_idx,
- reason=decision_reason,
- conflict_assessment=decision_conflict_assessment,
- outbox_count=outbox_count,
- round=decision_round,
- sim_t_s=sim_t_s,
- )
+ decision_reason = agent_state.last_llm_reason
+ fallback_reason = "cached"
replay.record_llm_dialog(
- step=step_idx,
- sim_t_s=sim_t_s,
- veh_id=vehicle,
- control_mode=CONTROL_MODE,
- model=OPENAI_MODEL,
- system_prompt=system_prompt,
- user_prompt=user_prompt,
- response_text=getattr(resp, "output_text", None),
- parsed=decision.model_dump() if hasattr(decision, "model_dump") else None,
- error=None,
+ step=step_idx, sim_t_s=sim_t_s, veh_id=vehicle,
+ control_mode=CONTROL_MODE, model=OPENAI_MODEL,
+ system_prompt=system_prompt, user_prompt=user_prompt,
+ response_text=f"[cached] choice_index={choice_idx}",
+ parsed=None, error=None,
)
- except Exception as e:
- print(f"[WARN] LLM decision failed for {vehicle}: {e}")
- llm_error = str(e)
- fallback_reason = "llm_error"
- if EVENTS_ENABLED:
- events.emit(
- "llm_error",
- summary=f"{vehicle} error={e}",
+ else:
+ # LLM decision (Structured Outputs)
+ try:
+ resp = client.responses.parse(
+ model=OPENAI_MODEL,
+ input=[
+ {"role": "system", "content": system_prompt},
+ {"role": "user", "content": user_prompt},
+ ],
+ text_format=DecisionModel,
+ )
+ decision = resp.output_parsed
+ choice_idx = int(decision.choice_index)
+ raw_choice_idx = choice_idx
+ decision_reason = getattr(decision, "reason", None)
+ decision_conflict_assessment = getattr(decision, "conflict_assessment", None)
+ outbox_count = len(getattr(decision, "outbox", None) or [])
+ messaging.queue_outbox(vehicle, getattr(decision, "outbox", None))
+ if EVENTS_ENABLED:
+ events.emit(
+ "llm_decision",
+ summary=f"{vehicle} choice={choice_idx} outbox={outbox_count}",
+ veh_id=vehicle,
+ choice_idx=choice_idx,
+ reason=decision_reason,
+ conflict_assessment=decision_conflict_assessment,
+ outbox_count=outbox_count,
+ round=decision_round,
+ sim_t_s=sim_t_s,
+ )
+ replay.record_llm_dialog(
+ step=step_idx,
+ sim_t_s=sim_t_s,
veh_id=vehicle,
- error=str(e),
- round=decision_round,
+ control_mode=CONTROL_MODE,
+ model=OPENAI_MODEL,
+ system_prompt=system_prompt,
+ user_prompt=user_prompt,
+ response_text=getattr(resp, "output_text", None),
+ parsed=decision.model_dump() if hasattr(decision, "model_dump") else None,
+ error=None,
+ )
+ except Exception as e:
+ print(f"[WARN] LLM decision failed for {vehicle}: {e}")
+ llm_error = str(e)
+ fallback_reason = "llm_error"
+ if EVENTS_ENABLED:
+ events.emit(
+ "llm_error",
+ summary=f"{vehicle} error={e}",
+ veh_id=vehicle,
+ error=str(e),
+ round=decision_round,
+ sim_t_s=sim_t_s,
+ )
+ replay.record_llm_dialog(
+ step=step_idx,
sim_t_s=sim_t_s,
+ veh_id=vehicle,
+ control_mode=CONTROL_MODE,
+ model=OPENAI_MODEL,
+ system_prompt=system_prompt,
+ user_prompt=user_prompt,
+ response_text=None,
+ parsed=None,
+ error=str(e),
)
- replay.record_llm_dialog(
- step=step_idx,
- sim_t_s=sim_t_s,
- veh_id=vehicle,
- control_mode=CONTROL_MODE,
- model=OPENAI_MODEL,
- system_prompt=system_prompt,
- user_prompt=user_prompt,
- response_text=None,
- parsed=None,
- error=str(e),
- )
- choice_idx = -2 # trigger fallback
+ choice_idx = -2 # trigger fallback
+ agent_state.last_input_hash = _veh_hash
+ agent_state.last_llm_choice_idx = choice_idx
+ agent_state.last_llm_reason = decision_reason
# Handle KEEP
if choice_idx == -1:
@@ -3648,19 +3857,32 @@ def record_agent_memory(
belief=belief_state,
psychology=agent_state.psychology,
profile=agent_state.profile,
+ scenario=SCENARIO_MODE,
)
prompt_route_menu = filter_menu_for_scenario(
SCENARIO_MODE,
menu,
control_mode="route",
)
- utility_policy = (
- "Use expected_utility as the main safety-efficiency tradeoff score; higher is better. "
- if SCENARIO_CONFIG["expected_utility_visible"]
- else "Do not assume a precomputed utility score is available in this scenario. "
- )
+ _rt_utility_basis = {
+ "no_notice": (
+ "expected_utility is available for all options; higher (less negative) is better. "
+ "Scores reflect your general hazard perception and route length — "
+ "you have no route-specific fire data. "
+ ),
+ "alert_guided": (
+ "expected_utility is available for all options; higher (less negative) is better. "
+ "Scores incorporate current fire positions along each route. "
+ ),
+ "advice_guided": (
+ "Use expected_utility as the main safety-efficiency tradeoff score; higher is better. "
+ ),
+ }
+ utility_policy = _rt_utility_basis.get(SCENARIO_MODE, _rt_utility_basis["advice_guided"])
guidance_policy = (
- "Use advisory/briefing/reasons to explain route quality in human language. "
+ "The Emergency Operations Center has assessed each route. "
+ "Follow routes with advisory='Recommended'; fall back to 'Use with caution' only if no recommended route is reachable. "
+ "Avoid routes marked 'Avoid for now' unless all alternatives are blocked. "
if SCENARIO_CONFIG["official_route_guidance_visible"]
else "No official route recommendation is available in this scenario; explain your choice using only the visible route facts and subjective information. "
)
@@ -3748,24 +3970,35 @@ def record_agent_memory(
"broadcast_token": "*",
},
"policy": (
- "Choose the safest route. Strongly avoid any route with blocked_edges > 0. "
+ "Priority 1 — Hard constraints: Choose the safest route. "
+ "Never choose any route with blocked_edges > 0. "
+ "Priority 2 — Official guidance: "
f"{guidance_policy}"
+ "Priority 3 — Risk assessment: "
f"{utility_policy}"
- "Use agent_self_history to avoid repeating ineffective choices. "
"If fire_proximity.is_getting_closer_to_fire=true, prioritize routes with larger min_margin_m. "
f"{forecast_policy}"
- "Consider your_observation, neighbor_assessment, and inbox messages to form your own hazard judgment. "
- "combined_belief is a mathematical estimate — you may weigh sources differently based on the situation. "
- "If information_conflict.sources_agree is false, pay attention to the disagreement "
- "and explain in conflict_assessment which source you trusted more and why. "
"When uncertainty is High, avoid fragile or highly exposed choices. "
- "Use neighborhood_observation and system_observation_updates as factual local social context; treat them as neutral observations rather than instructions. "
- "If messaging.enabled=true, you may include optional outbox items with {to, message}. "
- "Messages sent in this round are delivered to recipients in the next decision round. "
+ "Choosing a high-exposure route risks encountering fire directly. "
+ "Priority 4 — Situational awareness: "
+ "Consider your_observation, neighbor_assessment, and inbox for your hazard judgment. "
+ "combined_belief is a mathematical estimate — you may weigh sources differently. "
+ "If information_conflict.sources_agree is false, explain in conflict_assessment "
+ "which source you trusted more and why. "
+ "Use agent_self_history to avoid repeating ineffective choices. "
+ "Use neighborhood_observation and system_observation_updates as factual context, not instructions. "
+ "Priority 5 — Communication: If messaging.enabled=true, you may include optional outbox items "
+ "with {to, message}. Messages are delivered next round. "
f"{scenario_prompt_suffix(SCENARIO_MODE)}"
),
}
- system_prompt = "You are a wildfire evacuation routing agent. Follow the policy strictly."
+ system_prompt = (
+ "You are a resident evacuating from a wildfire, choosing the safest route to a shelter. "
+ "Your safety depends on this choice. "
+ "Trust official emergency guidance above personal observations, "
+ "and personal observations above unverified neighbor messages. "
+ "Follow the policy strictly."
+ )
user_prompt = json.dumps(env)
decision = None
decision_reason = None
@@ -3774,79 +4007,107 @@ def record_agent_memory(
fallback_reason = None
llm_error = None
- try:
- resp = client.responses.parse(
- model=OPENAI_MODEL,
- input=[
- {"role": "system", "content": system_prompt},
- {"role": "user", "content": user_prompt},
- ],
- text_format=DecisionModel,
- )
- decision = resp.output_parsed
- choice_idx = int(decision.choice_index)
+ # --- Input-hash skip: reuse previous LLM decision if inputs unchanged ---
+ _rt_hash = _decision_input_hash(
+ roadid, belief_state, len(inbox_for_vehicle),
+ current_edge_margin_m,
+ menu_utilities=tuple(
+ round(float(item.get("expected_utility") or 0), 2)
+ for item in menu
+ ),
+ )
+ if (
+ agent_state.last_input_hash == _rt_hash
+ and agent_state.last_llm_choice_idx is not None
+ ):
+ choice_idx = agent_state.last_llm_choice_idx
raw_choice_idx = choice_idx
- decision_reason = getattr(decision, "reason", None)
- decision_conflict_assessment = getattr(decision, "conflict_assessment", None)
- outbox_count = len(getattr(decision, "outbox", None) or [])
- messaging.queue_outbox(vehicle, getattr(decision, "outbox", None))
- if EVENTS_ENABLED:
- events.emit(
- "llm_decision",
- summary=f"{vehicle} choice={choice_idx} outbox={outbox_count}",
- veh_id=vehicle,
- choice_idx=choice_idx,
- reason=decision_reason,
- conflict_assessment=decision_conflict_assessment,
- outbox_count=outbox_count,
- round=decision_round,
- sim_t_s=sim_t_s,
- )
+ decision_reason = agent_state.last_llm_reason
+ fallback_reason = "cached"
replay.record_llm_dialog(
- step=step_idx,
- sim_t_s=sim_t_s,
- veh_id=vehicle,
- control_mode=CONTROL_MODE,
- model=OPENAI_MODEL,
- system_prompt=system_prompt,
- user_prompt=user_prompt,
- response_text=getattr(resp, "output_text", None),
- parsed=decision.model_dump() if hasattr(decision, "model_dump") else None,
- error=None,
+ step=step_idx, sim_t_s=sim_t_s, veh_id=vehicle,
+ control_mode=CONTROL_MODE, model=OPENAI_MODEL,
+ system_prompt=system_prompt, user_prompt=user_prompt,
+ response_text=f"[cached] choice_index={choice_idx}",
+ parsed=None, error=None,
)
- except Exception as e:
- print(f"[WARN] LLM decision failed for {vehicle}: {e}")
- llm_error = str(e)
- fallback_reason = "llm_error"
- if EVENTS_ENABLED:
- events.emit(
- "llm_error",
- summary=f"{vehicle} error={e}",
- veh_id=vehicle,
- error=str(e),
- round=decision_round,
+ else:
+ try:
+ resp = client.responses.parse(
+ model=OPENAI_MODEL,
+ input=[
+ {"role": "system", "content": system_prompt},
+ {"role": "user", "content": user_prompt},
+ ],
+ text_format=DecisionModel,
+ )
+ decision = resp.output_parsed
+ choice_idx = int(decision.choice_index)
+ raw_choice_idx = choice_idx
+ decision_reason = getattr(decision, "reason", None)
+ decision_conflict_assessment = getattr(decision, "conflict_assessment", None)
+ outbox_count = len(getattr(decision, "outbox", None) or [])
+ messaging.queue_outbox(vehicle, getattr(decision, "outbox", None))
+ if EVENTS_ENABLED:
+ events.emit(
+ "llm_decision",
+ summary=f"{vehicle} choice={choice_idx} outbox={outbox_count}",
+ veh_id=vehicle,
+ choice_idx=choice_idx,
+ reason=decision_reason,
+ conflict_assessment=decision_conflict_assessment,
+ outbox_count=outbox_count,
+ round=decision_round,
+ sim_t_s=sim_t_s,
+ )
+ replay.record_llm_dialog(
+ step=step_idx,
sim_t_s=sim_t_s,
+ veh_id=vehicle,
+ control_mode=CONTROL_MODE,
+ model=OPENAI_MODEL,
+ system_prompt=system_prompt,
+ user_prompt=user_prompt,
+ response_text=getattr(resp, "output_text", None),
+ parsed=decision.model_dump() if hasattr(decision, "model_dump") else None,
+ error=None,
)
- replay.record_llm_dialog(
- step=step_idx,
- sim_t_s=sim_t_s,
- veh_id=vehicle,
- control_mode=CONTROL_MODE,
- model=OPENAI_MODEL,
- system_prompt=system_prompt,
- user_prompt=user_prompt,
- response_text=None,
- parsed=None,
- error=str(e),
- )
- choice_idx = sorted(
- range(len(menu)),
- key=lambda i: (
- -float(menu[i].get("expected_utility", -10**9)),
- menu[i]["blocked_edges"],
- menu[i]["risk_sum"],
+ except Exception as e:
+ print(f"[WARN] LLM decision failed for {vehicle}: {e}")
+ llm_error = str(e)
+ fallback_reason = "llm_error"
+ if EVENTS_ENABLED:
+ events.emit(
+ "llm_error",
+ summary=f"{vehicle} error={e}",
+ veh_id=vehicle,
+ error=str(e),
+ round=decision_round,
+ sim_t_s=sim_t_s,
+ )
+ replay.record_llm_dialog(
+ step=step_idx,
+ sim_t_s=sim_t_s,
+ veh_id=vehicle,
+ control_mode=CONTROL_MODE,
+ model=OPENAI_MODEL,
+ system_prompt=system_prompt,
+ user_prompt=user_prompt,
+ response_text=None,
+ parsed=None,
+ error=str(e),
)
- )[0]
+ choice_idx = sorted(
+ range(len(menu)),
+ key=lambda i: (
+ -float(menu[i].get("expected_utility", -10**9)),
+ menu[i]["blocked_edges"],
+ menu[i]["risk_sum"],
+ )
+ )[0]
+ agent_state.last_input_hash = _rt_hash
+ agent_state.last_llm_choice_idx = choice_idx
+ agent_state.last_llm_reason = decision_reason
selected_item = next((x for x in menu if x.get("idx") == choice_idx), None)
if OVERLAYS_ENABLED:
@@ -4076,11 +4337,12 @@ def update_fire_shapes(sim_t_s: float):
# =========================
-# Step 8: Take simulation steps until there are no more vehicles in the network
+# Step 8: Take simulation steps until sim end time is reached
# =========================
step_idx = 0
+print(f"[SIM] Simulation will run until t={SIM_END_TIME_S:.0f}s (--sim-end-time / SIM_END_TIME_S)")
try:
- while traci.simulation.getMinExpectedNumber() > 0:
+ while traci.simulation.getTime() < SIM_END_TIME_S:
traci.simulationStep()
step_idx += 1
# --- NEW: visualize fire spread each step (or each decision round if you prefer) ---
@@ -4104,27 +4366,33 @@ def update_fire_shapes(sim_t_s: float):
)
active_vehicle_ids = list(traci.vehicle.getIDList())
_refresh_active_agent_live_status(sim_t, active_vehicle_ids)
- fires = active_fires(sim_t)
- fire_geom = [(float(item["x"]), float(item["y"]), float(item["r"])) for item in fires]
- for vid in active_vehicle_ids:
- try:
- roadid = traci.vehicle.getRoadID(vid)
- if not roadid or roadid.startswith(":"):
- continue
- _, risk_score, margin_m = compute_edge_risk_for_fires(roadid, fire_geom)
- metrics.record_exposure_sample(
- agent_id=vid,
- sim_t_s=sim_t,
- current_edge=roadid,
- current_margin_m=_round_or_none(margin_m, 2),
- risk_score=risk_score,
- )
- except traci.TraCIException:
- continue
metrics.observe_active_vehicles(active_vehicle_ids, sim_t)
+ # Early termination: stop when all agents arrived at their destination
+ if len(spawned) == len(SPAWN_EVENTS) and metrics.arrived_count() == len(SPAWN_EVENTS):
+ print(f"[SIM] All {len(SPAWN_EVENTS)} agents arrived at destination by t={sim_t:.1f}s — ending early.")
+ break
delta_t = traci.simulation.getDeltaT()
decision_period_steps = max(1, int(round(DECISION_PERIOD_S / max(1e-9, delta_t))))
if step_idx % decision_period_steps == 0:
+ # Record exposure once per decision round (not every step) to avoid
+ # diluting the average with many low-risk samples between rounds.
+ fires = active_fires(sim_t)
+ fire_geom = [(float(item["x"]), float(item["y"]), float(item["r"])) for item in fires]
+ for vid in active_vehicle_ids:
+ try:
+ roadid = traci.vehicle.getRoadID(vid)
+ if not roadid or roadid.startswith(":"):
+ continue
+ _, risk_score, margin_m = compute_edge_risk_for_fires(roadid, fire_geom)
+ metrics.record_exposure_sample(
+ agent_id=vid,
+ sim_t_s=sim_t,
+ current_edge=roadid,
+ current_margin_m=_round_or_none(margin_m, 2),
+ risk_score=risk_score,
+ )
+ except traci.TraCIException:
+ continue
replay.record_metric_snapshot(
step=step_idx,
sim_t_s=sim_t,
diff --git a/scripts/run_rq1_info_quality.sh b/scripts/run_rq1_info_quality.sh
new file mode 100755
index 0000000..f57992c
--- /dev/null
+++ b/scripts/run_rq1_info_quality.sh
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+# ==============================================================================
+# RQ1: Information Quality → Departure Timing, Route Choice, Decision Instability
+#
+# Research question:
+# How do information quality factors (observation noise σ_info, information
+# delay, and conflicting signals) affect departure timing, route-choice
+# distribution, and decision instability?
+#
+# Design:
+# IV1: INFO_SIGMA = {0, 20, 40, 80} (observation noise)
+# IV2: INFO_DELAY_S = {0, 15, 30, 60} (information delay)
+# Moderator: scenario = {no_notice, alert_guided, advice_guided}
+# Messaging: on (fixed — required for signal conflict to emerge)
+# theta_trust: 0.5 (fixed — social channel held neutral)
+# Population: homogeneous (all spreads = 0)
+# Seeds: 5 (stochastic replication)
+#
+# Grid: 4 sigma × 4 delay × 3 scenario × 5 seeds = 240 runs
+#
+# Primary DVs:
+# - departure_time_variability
+# - route_choice_entropy
+# - decision_instability (average_changes, max_changes)
+# - average_signal_conflict (JSD — measures conflicting signals)
+# - destination_choice_share
+# - departed_agents, arrived_agents
+# ==============================================================================
+set -euo pipefail
+
+ROOT_DIR="$(cd "$(dirname "$0")/.." && pwd)"
+cd "$ROOT_DIR"
+
+SEEDS=(12345 12346 12347 12348 12349)
+
+for seed in "${SEEDS[@]}"; do
+ echo "============================================"
+ echo "[RQ1] seed=${seed}"
+ echo "============================================"
+ SUMO_SEED="$seed" python3 -m agentevac.analysis.experiments \
+ --output-dir "outputs/rq1/info_quality_seed_${seed}" \
+ --sumo-binary sumo \
+ --sigma-values 0,20,40,80 \
+ --delay-values 0,15,30,60 \
+ --trust-values 0.5 \
+ --scenario-values no_notice,alert_guided,advice_guided \
+ --messaging on
+done
+
+echo "[RQ1] All seeds complete."
diff --git a/scripts/run_rq2_social_trust.sh b/scripts/run_rq2_social_trust.sh
new file mode 100755
index 0000000..6f82895
--- /dev/null
+++ b/scripts/run_rq2_social_trust.sh
@@ -0,0 +1,59 @@
+#!/usr/bin/env bash
+# ==============================================================================
+# RQ2: Social Cues × Trust → Cascades, Herding, Belief Dynamics
+#
+# Research question:
+# Given fixed information quality, how do social cues and trust (θ_trust)
+# interact to shape collective departure cascades, herding in route/
+# destination choice, and changes in decision instability and belief
+# uncertainty over time?
+#
+# Design:
+# IV1: DEFAULT_THETA_TRUST = {0.0, 0.25, 0.5, 0.75, 1.0}
+# IV2: messaging = {on, off}
+# Moderator: scenario = {no_notice, alert_guided, advice_guided}
+# INFO_SIGMA: 40 (fixed — moderate noise)
+# INFO_DELAY_S: 30 (fixed — moderate delay)
+# Population: homogeneous (all spreads = 0)
+# Seeds: 5 (stochastic replication)
+#
+# Grid: 5 trust × 2 messaging × 3 scenario × 5 seeds = 150 runs
+#
+# Primary DVs (from metrics JSON):
+# - departure_time_variability (cascade synchronization)
+# - route_choice_entropy (herding)
+# - decision_instability (flip-flopping)
+# - average_signal_conflict (env vs. social disagreement)
+# - destination_choice_share (which destinations attract herds)
+# - departed_agents, arrived_agents
+#
+# Time-series DVs (post-process from replay JSONL):
+# - belief entropy over time (agent_cognition events)
+# - confidence over time (agent_cognition events)
+# - departure reason counts (departure_release events)
+# - cascade chain analysis (departure_release reason=neighbor_departure_activity)
+# ==============================================================================
+set -euo pipefail
+
+ROOT_DIR="$(cd "$(dirname "$0")/.." && pwd)"
+cd "$ROOT_DIR"
+
+SEEDS=(12345 12346 12347 12348 12349)
+
+for messaging in on off; do
+ for seed in "${SEEDS[@]}"; do
+ echo "============================================"
+ echo "[RQ2] messaging=${messaging} seed=${seed}"
+ echo "============================================"
+ SUMO_SEED="$seed" python3 -m agentevac.analysis.experiments \
+ --output-dir "outputs/rq2/social_trust_msg_${messaging}_seed_${seed}" \
+ --sumo-binary sumo \
+ --sigma-values 40 \
+ --delay-values 30 \
+ --trust-values 0.0,0.25,0.5,0.75,1.0 \
+ --scenario-values no_notice,alert_guided,advice_guided \
+ --messaging "$messaging"
+ done
+done
+
+echo "[RQ2] All conditions complete."
diff --git a/scripts/run_rq3_pareto.sh b/scripts/run_rq3_pareto.sh
new file mode 100755
index 0000000..375e1c7
--- /dev/null
+++ b/scripts/run_rq3_pareto.sh
@@ -0,0 +1,57 @@
+#!/usr/bin/env bash
+# ==============================================================================
+# RQ3: Safety–Efficiency Pareto Frontier Across Alerting Regimes
+#
+# Research question:
+# How does the safety-efficiency trade-off (average hazard exposure E_avg
+# vs average travel time T_avg) change across combinations of information
+# quality, delay, and trust? How does the resulting Pareto frontier shift
+# under the three alerting regimes (no-notice, alert-guided, advice-guided)?
+#
+# Design:
+# IV1: INFO_SIGMA = {20, 40, 80}
+# IV2: INFO_DELAY_S = {0, 30, 60}
+# IV3: DEFAULT_THETA_TRUST = {0.25, 0.5, 0.75}
+# Grouping: scenario = {no_notice, alert_guided, advice_guided}
+# Messaging: on (fixed)
+# Population: homogeneous (all spreads = 0)
+# Seeds: 5 (stochastic replication)
+#
+# Grid: 3 sigma × 3 delay × 3 trust × 3 scenario × 5 seeds = 405 runs
+#
+# Primary DVs:
+# - average_hazard_exposure (E_avg — safety axis)
+# - average_travel_time (T_avg — efficiency axis)
+# - arrived_agents (completion filter)
+#
+# Per-agent DVs (for equity analysis):
+# - per_agent hazard exposure
+# - per_agent travel time
+#
+# Analysis-level metrics (computed post-hoc):
+# - Pareto frontier per scenario
+# - Hypervolume indicator per scenario
+# - Frontier shift between scenarios
+# ==============================================================================
+set -euo pipefail
+
+ROOT_DIR="$(cd "$(dirname "$0")/.." && pwd)"
+cd "$ROOT_DIR"
+
+SEEDS=(12345 12346 12347 12348 12349)
+
+for seed in "${SEEDS[@]}"; do
+ echo "============================================"
+ echo "[RQ3] seed=${seed}"
+ echo "============================================"
+ SUMO_SEED="$seed" python3 -m agentevac.analysis.experiments \
+ --output-dir "outputs/rq3/pareto_seed_${seed}" \
+ --sumo-binary sumo \
+ --sigma-values 20,40,80 \
+ --delay-values 0,30,60 \
+ --trust-values 0.25,0.5,0.75 \
+ --scenario-values no_notice,alert_guided,advice_guided \
+ --messaging on
+done
+
+echo "[RQ3] All seeds complete."
diff --git a/scripts/run_rq4_heterogeneity.sh b/scripts/run_rq4_heterogeneity.sh
new file mode 100755
index 0000000..a923262
--- /dev/null
+++ b/scripts/run_rq4_heterogeneity.sh
@@ -0,0 +1,83 @@
+#!/usr/bin/env bash
+# ==============================================================================
+# RQ4: Population Heterogeneity — Diversity as Resilience or Fragility
+#
+# Research question:
+# Does heterogeneity in risk tolerance and decision weights improve
+# system-level evacuation resilience, and does the optimal diversity
+# level depend on the information regime?
+#
+# Design:
+# IV1: spread_level = {none, low, moderate, high}
+# Moderator: scenario = {no_notice, alert_guided, advice_guided}
+# INFO_SIGMA: 40 (fixed — moderate noise)
+# INFO_DELAY_S: 30 (fixed — moderate delay)
+# theta_trust mean: 0.5 (fixed)
+# Messaging: on (fixed — social channel needed for canary cascade)
+# Seeds: 10 (more seeds for stochastic spread effects)
+#
+# Spread levels (std-dev of truncated normal around population means):
+# ┌──────────────┬──────┬──────┬──────────┬──────┐
+# │ Parameter │ none │ low │ moderate │ high │
+# ├──────────────┼──────┼──────┼──────────┼──────┤
+# │ theta_trust │ 0.0 │ 0.05 │ 0.12 │ 0.20 │
+# │ theta_r │ 0.0 │ 0.03 │ 0.08 │ 0.15 │
+# │ theta_u │ 0.0 │ 0.03 │ 0.08 │ 0.15 │
+# │ gamma │ 0.0 │ 0.001│ 0.003 │ 0.005│
+# │ lambda_e │ 0.0 │ 0.15 │ 0.4 │ 0.8 │
+# │ lambda_t │ 0.0 │ 0.03 │ 0.08 │ 0.15 │
+# └──────────────┴──────┴──────┴──────────┴──────┘
+#
+# Grid: 4 spread × 3 scenario × 10 seeds = 120 runs
+#
+# Primary DVs:
+# - arrived_agents, average_hazard_exposure, average_travel_time
+# - departure_time_variability, route_choice_entropy, decision_instability
+#
+# Per-agent DVs (for equity / Gini analysis):
+# - per_agent hazard exposure, per_agent travel time
+#
+# Time-series DVs (post-process from replay JSONL):
+# - departure reason distribution (canary cascade detection)
+# ==============================================================================
+set -euo pipefail
+
+ROOT_DIR="$(cd "$(dirname "$0")/.." && pwd)"
+cd "$ROOT_DIR"
+
+SEEDS=(12345 12346 12347 12348 12349 12350 12351 12352 12353 12354)
+
+# Spread level definitions: (label, theta_trust, theta_r, theta_u, gamma, lambda_e, lambda_t)
+SPREAD_LABELS=( "none" "low" "moderate" "high" )
+SPREAD_TRUST=( "0.0" "0.05" "0.12" "0.20" )
+SPREAD_TR=( "0.0" "0.03" "0.08" "0.15" )
+SPREAD_TU=( "0.0" "0.03" "0.08" "0.15" )
+SPREAD_GAMMA=( "0.0" "0.001" "0.003" "0.005")
+SPREAD_LE=( "0.0" "0.15" "0.4" "0.8" )
+SPREAD_LT=( "0.0" "0.03" "0.08" "0.15" )
+
+for i in "${!SPREAD_LABELS[@]}"; do
+ spread="${SPREAD_LABELS[$i]}"
+ for seed in "${SEEDS[@]}"; do
+ echo "============================================"
+ echo "[RQ4] spread=${spread} seed=${seed}"
+ echo "============================================"
+ SUMO_SEED="$seed" \
+ THETA_TRUST_SPREAD="${SPREAD_TRUST[$i]}" \
+ THETA_R_SPREAD="${SPREAD_TR[$i]}" \
+ THETA_U_SPREAD="${SPREAD_TU[$i]}" \
+ GAMMA_SPREAD="${SPREAD_GAMMA[$i]}" \
+ LAMBDA_E_SPREAD="${SPREAD_LE[$i]}" \
+ LAMBDA_T_SPREAD="${SPREAD_LT[$i]}" \
+ python3 -m agentevac.analysis.experiments \
+ --output-dir "outputs/rq4/heterogeneity_spread_${spread}_seed_${seed}" \
+ --sumo-binary sumo \
+ --sigma-values 40 \
+ --delay-values 30 \
+ --trust-values 0.5 \
+ --scenario-values no_notice,alert_guided,advice_guided \
+ --messaging on
+ done
+done
+
+echo "[RQ4] All conditions complete."
diff --git a/sumo/Repaired.net.xml b/sumo/Repaired.net.xml
new file mode 100644
index 0000000..55e28ab
--- /dev/null
+++ b/sumo/Repaired.net.xml
@@ -0,0 +1,7446 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/sumo/Repaired.netecfg b/sumo/Repaired.netecfg
index f9322ad..8978d25 100644
--- a/sumo/Repaired.netecfg
+++ b/sumo/Repaired.netecfg
@@ -1,6 +1,6 @@
-
@@ -9,7 +9,7 @@
-
+
diff --git a/sumo/Repaired.rou.xml b/sumo/Repaired.rou.xml
index 29df917..34d6f9c 100644
--- a/sumo/Repaired.rou.xml
+++ b/sumo/Repaired.rou.xml
@@ -1,9 +1,7 @@
-
-
-
diff --git a/sumo/Repaired.sumocfg b/sumo/Repaired.sumocfg
index 6f4785e..65c0438 100644
--- a/sumo/Repaired.sumocfg
+++ b/sumo/Repaired.sumocfg
@@ -1,12 +1,12 @@
-
-
+
diff --git a/tests/test_routing_utility.py b/tests/test_routing_utility.py
index 43b05a9..ecbdb96 100644
--- a/tests/test_routing_utility.py
+++ b/tests/test_routing_utility.py
@@ -3,6 +3,7 @@
import pytest
from agentevac.agents.routing_utility import (
+ _observation_based_exposure,
annotate_menu_with_expected_utility,
score_destination_utility,
score_route_utility,
@@ -172,3 +173,116 @@ def test_higher_risk_gets_lower_utility(self):
psychology=_psychology(confidence=0.1), profile=_profile()
)
assert menu[0]["expected_utility"] > menu[1]["expected_utility"]
+
+
+class TestObservationBasedExposure:
+ """Tests for the no_notice exposure function that uses only belief + route length."""
+
+ def test_zero_danger_gives_low_exposure(self):
+ item = {"len_edges": 5}
+ belief = {"p_safe": 0.9, "p_risky": 0.05, "p_danger": 0.05}
+ psych = {"perceived_risk": 0.05, "confidence": 0.8}
+ exposure = _observation_based_exposure(item, belief, psych)
+ assert exposure < 1.0
+
+ def test_high_danger_gives_high_exposure(self):
+ item = {"len_edges": 5}
+ belief = {"p_safe": 0.05, "p_risky": 0.05, "p_danger": 0.9}
+ psych = {"perceived_risk": 0.8, "confidence": 0.1}
+ exposure = _observation_based_exposure(item, belief, psych)
+ assert exposure > 1.0
+
+ def test_longer_route_gives_more_exposure(self):
+ belief = {"p_safe": 0.1, "p_risky": 0.3, "p_danger": 0.6}
+ psych = {"perceived_risk": 0.5, "confidence": 0.5}
+ short = _observation_based_exposure({"len_edges": 3}, belief, psych)
+ long = _observation_based_exposure({"len_edges": 15}, belief, psych)
+ assert long > short
+
+ def test_same_length_same_belief_gives_same_exposure(self):
+ belief = _neutral_belief()
+ psych = _psychology()
+ e1 = _observation_based_exposure({"len_edges": 5}, belief, psych)
+ e2 = _observation_based_exposure({"len_edges": 5}, belief, psych)
+ assert e1 == pytest.approx(e2)
+
+ def test_low_confidence_adds_uncertainty_penalty(self):
+ item = {"len_edges": 5}
+ belief = _neutral_belief()
+ confident = _observation_based_exposure(item, belief, _psychology(confidence=0.9))
+ uncertain = _observation_based_exposure(item, belief, _psychology(confidence=0.1))
+ assert uncertain > confident
+
+ def test_uses_len_edges_fastest_path_fallback(self):
+ belief = _neutral_belief()
+ psych = _psychology()
+ item_a = {"len_edges": 10}
+ item_b = {"len_edges_fastest_path": 10}
+ assert _observation_based_exposure(item_a, belief, psych) == pytest.approx(
+ _observation_based_exposure(item_b, belief, psych)
+ )
+
+
+class TestAnnotateMenuScenarioParam:
+ """Tests that the scenario parameter selects the correct exposure function."""
+
+ def _make_menu(self):
+ return [
+ {
+ "idx": 0, "name": "s0", "reachable": True,
+ "risk_sum": 3.0, "blocked_edges": 1, "min_margin_m": 50.0,
+ "travel_time_s_fastest_path": 300.0, "len_edges_fastest_path": 8,
+ },
+ ]
+
+ def test_no_notice_uses_observation_based_exposure(self):
+ menu = self._make_menu()
+ annotate_menu_with_expected_utility(
+ menu, mode="destination", belief=_neutral_belief(),
+ psychology=_psychology(), profile=_profile(), scenario="no_notice",
+ )
+ # Observation-based exposure ignores risk_sum and blocked_edges,
+ # so it should be much lower than route-specific exposure.
+ obs_exposure = menu[0]["utility_components"]["expected_exposure"]
+
+ menu2 = self._make_menu()
+ annotate_menu_with_expected_utility(
+ menu2, mode="destination", belief=_neutral_belief(),
+ psychology=_psychology(), profile=_profile(), scenario="advice_guided",
+ )
+ full_exposure = menu2[0]["utility_components"]["expected_exposure"]
+ # Route with blocked_edges=1 and risk_sum=3.0 should have much higher
+ # full exposure than belief-only exposure.
+ assert full_exposure > obs_exposure
+
+ def test_advice_guided_uses_route_specific_exposure(self):
+ menu = self._make_menu()
+ annotate_menu_with_expected_utility(
+ menu, mode="destination", belief=_neutral_belief(),
+ psychology=_psychology(), profile=_profile(), scenario="advice_guided",
+ )
+ # With blocked_edges=1, the exposure should include the 8.0 penalty.
+ assert menu[0]["utility_components"]["expected_exposure"] > 8.0
+
+ def test_alert_guided_uses_route_specific_exposure(self):
+ menu = self._make_menu()
+ annotate_menu_with_expected_utility(
+ menu, mode="destination", belief=_neutral_belief(),
+ psychology=_psychology(), profile=_profile(), scenario="alert_guided",
+ )
+ assert menu[0]["utility_components"]["expected_exposure"] > 8.0
+
+ def test_default_scenario_is_advice_guided(self):
+ menu_default = self._make_menu()
+ menu_explicit = self._make_menu()
+ annotate_menu_with_expected_utility(
+ menu_default, mode="destination", belief=_neutral_belief(),
+ psychology=_psychology(), profile=_profile(),
+ )
+ annotate_menu_with_expected_utility(
+ menu_explicit, mode="destination", belief=_neutral_belief(),
+ psychology=_psychology(), profile=_profile(), scenario="advice_guided",
+ )
+ assert menu_default[0]["expected_utility"] == pytest.approx(
+ menu_explicit[0]["expected_utility"]
+ )
diff --git a/tests/test_scenarios.py b/tests/test_scenarios.py
index d56123f..c7d63e4 100644
--- a/tests/test_scenarios.py
+++ b/tests/test_scenarios.py
@@ -16,14 +16,14 @@ def test_no_notice_mode(self):
cfg = load_scenario_config("no_notice")
assert cfg["mode"] == "no_notice"
assert cfg["forecast_visible"] is False
- assert cfg["expected_utility_visible"] is False
+ assert cfg["expected_utility_visible"] is True
def test_alert_guided_mode(self):
cfg = load_scenario_config("alert_guided")
assert cfg["mode"] == "alert_guided"
assert cfg["forecast_visible"] is True
assert cfg["route_head_forecast_visible"] is False
- assert cfg["expected_utility_visible"] is False
+ assert cfg["expected_utility_visible"] is True
def test_advice_guided_mode(self):
cfg = load_scenario_config("advice_guided")
@@ -139,11 +139,11 @@ def test_alert_guided_removes_advisory(self):
assert "briefing" not in result[0]
assert "reasons" not in result[0]
- def test_alert_guided_removes_expected_utility(self):
+ def test_alert_guided_retains_expected_utility(self):
menu = self._full_menu()
result = filter_menu_for_scenario("alert_guided", menu, control_mode="destination")
- assert "expected_utility" not in result[0]
- assert "utility_components" not in result[0]
+ assert "expected_utility" in result[0]
+ assert "utility_components" in result[0]
def test_alert_guided_retains_risk_fields(self):
menu = self._full_menu()
@@ -151,18 +151,31 @@ def test_alert_guided_retains_risk_fields(self):
assert "risk_sum" in result[0]
assert "blocked_edges" in result[0]
- def test_no_notice_destination_reduces_to_minimal_keys(self):
+ def test_no_notice_destination_keeps_local_knowledge_and_utility(self):
menu = self._full_menu()
+ menu[0]["travel_time_s_fastest_path"] = 300.0
+ menu[0]["len_edges_fastest_path"] = 8
result = filter_menu_for_scenario("no_notice", menu, control_mode="destination")
- allowed = {"idx", "name", "dest_edge", "reachable", "note"}
+ allowed = {
+ "idx", "name", "dest_edge", "reachable", "note",
+ "travel_time_s_fastest_path", "len_edges_fastest_path",
+ "expected_utility", "utility_components",
+ }
assert set(result[0].keys()).issubset(allowed)
assert "risk_sum" not in result[0]
-
- def test_no_notice_route_mode_reduces_to_minimal_keys(self):
- menu = [{"idx": 0, "name": "r0", "len_edges": 5, "risk_sum": 2.0}]
+ assert "expected_utility" in result[0]
+ assert "travel_time_s_fastest_path" in result[0]
+
+ def test_no_notice_route_mode_keeps_utility_and_length(self):
+ menu = [{
+ "idx": 0, "name": "r0", "len_edges": 5, "risk_sum": 2.0,
+ "expected_utility": -0.3, "utility_components": {"expected_exposure": 0.1},
+ }]
result = filter_menu_for_scenario("no_notice", menu, control_mode="route")
assert "risk_sum" not in result[0]
assert "len_edges" in result[0]
+ assert "expected_utility" in result[0]
+ assert "utility_components" in result[0]
def test_original_menu_list_not_mutated(self):
menu = self._full_menu()