diff --git a/claude_code_log/cache.py b/claude_code_log/cache.py
index 7213da9b..d0a3ea00 100644
--- a/claude_code_log/cache.py
+++ b/claude_code_log/cache.py
@@ -3,7 +3,7 @@
import json
from pathlib import Path
-from typing import Any, Dict, List, Optional, cast
+from typing import Any, Optional, cast
from datetime import datetime
from pydantic import BaseModel
from packaging import version
@@ -18,7 +18,7 @@ class CachedFileInfo(BaseModel):
source_mtime: float
cached_mtime: float
message_count: int
- session_ids: List[str]
+ session_ids: list[str]
class SessionCacheData(BaseModel):
@@ -46,7 +46,7 @@ class ProjectCache(BaseModel):
project_path: str
# File-level cache information
- cached_files: Dict[str, CachedFileInfo]
+ cached_files: dict[str, CachedFileInfo]
# Aggregated project information
total_message_count: int = 0
@@ -56,10 +56,10 @@ class ProjectCache(BaseModel):
total_cache_read_tokens: int = 0
# Session metadata
- sessions: Dict[str, SessionCacheData]
+ sessions: dict[str, SessionCacheData]
# Working directories associated with this project
- working_directories: List[str] = []
+ working_directories: list[str] = []
# Timeline information
earliest_timestamp: str = ""
@@ -154,7 +154,7 @@ def is_file_cached(self, jsonl_path: Path) -> bool:
abs(source_mtime - cached_info.source_mtime) < 1.0 and cache_file.exists()
)
- def load_cached_entries(self, jsonl_path: Path) -> Optional[List[TranscriptEntry]]:
+ def load_cached_entries(self, jsonl_path: Path) -> Optional[list[TranscriptEntry]]:
"""Load cached transcript entries for a JSONL file."""
if not self.is_file_cached(jsonl_path):
return None
@@ -165,11 +165,11 @@ def load_cached_entries(self, jsonl_path: Path) -> Optional[List[TranscriptEntry
cache_data = json.load(f)
# Expect timestamp-keyed format - flatten all entries
- entries_data: List[Dict[str, Any]] = []
+ entries_data: list[dict[str, Any]] = []
for timestamp_entries in cache_data.values():
if isinstance(timestamp_entries, list):
- # Type cast to ensure Pyright knows this is List[Dict[str, Any]]
- entries_data.extend(cast(List[Dict[str, Any]], timestamp_entries))
+ # Type cast to ensure Pyright knows this is list[dict[str, Any]]
+ entries_data.extend(cast(list[dict[str, Any]], timestamp_entries))
# Deserialize back to TranscriptEntry objects
from .parser import parse_transcript_entry
@@ -184,7 +184,7 @@ def load_cached_entries(self, jsonl_path: Path) -> Optional[List[TranscriptEntry
def load_cached_entries_filtered(
self, jsonl_path: Path, from_date: Optional[str], to_date: Optional[str]
- ) -> Optional[List[TranscriptEntry]]:
+ ) -> Optional[list[TranscriptEntry]]:
"""Load cached entries with efficient timestamp-based filtering."""
if not self.is_file_cached(jsonl_path):
return None
@@ -226,15 +226,15 @@ def load_cached_entries_filtered(
)
# Filter entries by timestamp
- filtered_entries_data: List[Dict[str, Any]] = []
+ filtered_entries_data: list[dict[str, Any]] = []
for timestamp_key, timestamp_entries in cache_data.items():
if timestamp_key == "_no_timestamp":
# Always include entries without timestamps (like summaries)
if isinstance(timestamp_entries, list):
- # Type cast to ensure Pyright knows this is List[Dict[str, Any]]
+ # Type cast to ensure Pyright knows this is list[dict[str, Any]]
filtered_entries_data.extend(
- cast(List[Dict[str, Any]], timestamp_entries)
+ cast(list[dict[str, Any]], timestamp_entries)
)
else:
# Check if timestamp falls within range
@@ -251,9 +251,9 @@ def load_cached_entries_filtered(
continue
if isinstance(timestamp_entries, list):
- # Type cast to ensure Pyright knows this is List[Dict[str, Any]]
+ # Type cast to ensure Pyright knows this is list[dict[str, Any]]
filtered_entries_data.extend(
- cast(List[Dict[str, Any]], timestamp_entries)
+ cast(list[dict[str, Any]], timestamp_entries)
)
# Deserialize filtered entries
@@ -271,14 +271,14 @@ def load_cached_entries_filtered(
return None
def save_cached_entries(
- self, jsonl_path: Path, entries: List[TranscriptEntry]
+ self, jsonl_path: Path, entries: list[TranscriptEntry]
) -> None:
"""Save parsed transcript entries to cache with timestamp-based structure."""
cache_file = self._get_cache_file_path(jsonl_path)
try:
# Create timestamp-keyed cache structure for efficient date filtering
- cache_data: Dict[str, Any] = {}
+ cache_data: dict[str, Any] = {}
for entry in entries:
# Get timestamp - use empty string as fallback for entries without timestamps
@@ -306,7 +306,7 @@ def save_cached_entries(
cached_mtime = cache_file.stat().st_mtime
# Extract session IDs from entries
- session_ids: List[str] = []
+ session_ids: list[str] = []
for entry in entries:
if hasattr(entry, "sessionId"):
session_id = getattr(entry, "sessionId", "")
@@ -326,7 +326,7 @@ def save_cached_entries(
except Exception as e:
print(f"Warning: Failed to save cached entries to {cache_file}: {e}")
- def update_session_cache(self, session_data: Dict[str, SessionCacheData]) -> None:
+ def update_session_cache(self, session_data: dict[str, SessionCacheData]) -> None:
"""Update cached session information."""
if self._project_cache is None:
return
@@ -360,7 +360,7 @@ def update_project_aggregates(
self._save_project_cache()
- def update_working_directories(self, working_directories: List[str]) -> None:
+ def update_working_directories(self, working_directories: list[str]) -> None:
"""Update the list of working directories associated with this project."""
if self._project_cache is None:
return
@@ -368,9 +368,9 @@ def update_working_directories(self, working_directories: List[str]) -> None:
self._project_cache.working_directories = working_directories
self._save_project_cache()
- def get_modified_files(self, jsonl_files: List[Path]) -> List[Path]:
+ def get_modified_files(self, jsonl_files: list[Path]) -> list[Path]:
"""Get list of JSONL files that need to be reprocessed."""
- modified_files: List[Path] = []
+ modified_files: list[Path] = []
for jsonl_file in jsonl_files:
if not self.is_file_cached(jsonl_file):
@@ -450,7 +450,7 @@ def _is_cache_version_compatible(self, cache_version: str) -> bool:
# If no breaking changes affect this cache version, it's compatible
return True
- def get_cache_stats(self) -> Dict[str, Any]:
+ def get_cache_stats(self) -> dict[str, Any]:
"""Get cache statistics for reporting."""
if self._project_cache is None:
return {"cache_enabled": False}
diff --git a/claude_code_log/cli.py b/claude_code_log/cli.py
index 13ab85c9..285c1fc1 100644
--- a/claude_code_log/cli.py
+++ b/claude_code_log/cli.py
@@ -5,7 +5,7 @@
import os
import sys
from pathlib import Path
-from typing import Optional, List
+from typing import Optional
import click
from git import Repo, InvalidGitRepositoryError
@@ -108,7 +108,7 @@ def convert_project_path_to_claude_dir(
def find_projects_by_cwd(
projects_dir: Path, current_cwd: Optional[str] = None
-) -> List[Path]:
+) -> list[Path]:
"""Find Claude projects that match the current working directory.
Uses three-tier priority matching:
@@ -148,8 +148,8 @@ def find_projects_by_cwd(
def _find_exact_matches(
- project_dirs: List[Path], current_cwd_path: Path, base_projects_dir: Path
-) -> List[Path]:
+ project_dirs: list[Path], current_cwd_path: Path, base_projects_dir: Path
+) -> list[Path]:
"""Find projects with exact working directory matches using path-based matching."""
expected_project_dir = convert_project_path_to_claude_dir(
current_cwd_path, base_projects_dir
@@ -163,8 +163,8 @@ def _find_exact_matches(
def _find_git_root_matches(
- project_dirs: List[Path], current_cwd_path: Path, base_projects_dir: Path
-) -> List[Path]:
+ project_dirs: list[Path], current_cwd_path: Path, base_projects_dir: Path
+) -> list[Path]:
"""Find projects that match the git repository root using path-based matching."""
try:
# Check if we're inside a git repository
@@ -182,10 +182,10 @@ def _find_git_root_matches(
def _find_relative_matches(
- project_dirs: List[Path], current_cwd_path: Path
-) -> List[Path]:
+ project_dirs: list[Path], current_cwd_path: Path
+) -> list[Path]:
"""Find projects using relative path matching (original behavior)."""
- relative_matches: List[Path] = []
+ relative_matches: list[Path] = []
for project_dir in project_dirs:
try:
diff --git a/claude_code_log/converter.py b/claude_code_log/converter.py
index 83e58ee6..960e8b4e 100644
--- a/claude_code_log/converter.py
+++ b/claude_code_log/converter.py
@@ -5,7 +5,7 @@
import re
from pathlib import Path
import traceback
-from typing import List, Optional, Dict, Any, TYPE_CHECKING
+from typing import Optional, Any, TYPE_CHECKING
import dateparser
@@ -39,18 +39,22 @@
def filter_messages_by_date(
- messages: List[TranscriptEntry], from_date: Optional[str], to_date: Optional[str]
-) -> List[TranscriptEntry]:
- """Filter messages based on date range."""
+ messages: list[TranscriptEntry], from_date: Optional[str], to_date: Optional[str]
+) -> list[TranscriptEntry]:
+ """Filter messages based on date range.
+
+ Date parsing is done in UTC to match transcript timestamps which are stored in UTC.
+ """
if not from_date and not to_date:
return messages
- # Parse the date strings using dateparser
+ # Parse dates in UTC to match transcript timestamps (which are stored in UTC)
+ dateparser_settings: Any = {"TIMEZONE": "UTC", "RETURN_AS_TIMEZONE_AWARE": False}
from_dt = None
to_dt = None
if from_date:
- from_dt = dateparser.parse(from_date)
+ from_dt = dateparser.parse(from_date, settings=dateparser_settings)
if not from_dt:
raise ValueError(f"Could not parse from-date: {from_date}")
# If parsing relative dates like "today", start from beginning of day
@@ -58,14 +62,14 @@ def filter_messages_by_date(
from_dt = from_dt.replace(hour=0, minute=0, second=0, microsecond=0)
if to_date:
- to_dt = dateparser.parse(to_date)
+ to_dt = dateparser.parse(to_date, settings=dateparser_settings)
if not to_dt:
raise ValueError(f"Could not parse to-date: {to_date}")
# If parsing relative dates like "today", end at end of day
if to_date in ["today", "yesterday"] or "days ago" in to_date:
to_dt = to_dt.replace(hour=23, minute=59, second=59, microsecond=999999)
- filtered_messages: List[TranscriptEntry] = []
+ filtered_messages: list[TranscriptEntry] = []
for message in messages:
# Handle SummaryTranscriptEntry which doesn't have timestamp
if isinstance(message, SummaryTranscriptEntry):
@@ -102,7 +106,7 @@ def load_transcript(
to_date: Optional[str] = None,
silent: bool = False,
_loaded_files: Optional[set[Path]] = None,
-) -> List[TranscriptEntry]:
+) -> list[TranscriptEntry]:
"""Load and parse JSONL transcript file, using cache if available.
Args:
@@ -133,7 +137,7 @@ def load_transcript(
return cached_entries
# Parse from source file
- messages: List[TranscriptEntry] = []
+ messages: list[TranscriptEntry] = []
agent_ids: set[str] = set() # Collect agentId references while parsing
with open(jsonl_path, "r", encoding="utf-8", errors="replace") as f:
@@ -210,17 +214,17 @@ def load_transcript(
else:
print(
f"Line {line_no} of {jsonl_path} | ValueError: {error_msg}"
- "\n{traceback.format_exc()}"
+ f"\n{traceback.format_exc()}"
)
except Exception as e:
print(
f"Line {line_no} of {jsonl_path} | Unexpected error: {str(e)}"
- "\n{traceback.format_exc()}"
+ f"\n{traceback.format_exc()}"
)
# Load agent files if any were referenced
# Build a map of agentId -> agent messages
- agent_messages_map: dict[str, List[TranscriptEntry]] = {}
+ agent_messages_map: dict[str, list[TranscriptEntry]] = {}
if agent_ids:
parent_dir = jsonl_path.parent
for agent_id in agent_ids:
@@ -242,11 +246,11 @@ def load_transcript(
)
agent_messages_map[agent_id] = agent_messages
- # Insert agent messages at their point of use
+ # Insert agent messages at their point of use (only once per agent)
if agent_messages_map:
- # Iterate through messages and insert agent messages after the message
+ # Iterate through messages and insert agent messages after the FIRST message
# that references them (via UserTranscriptEntry.agentId)
- result_messages: List[TranscriptEntry] = []
+ result_messages: list[TranscriptEntry] = []
for message in messages:
result_messages.append(message)
@@ -254,8 +258,8 @@ def load_transcript(
if isinstance(message, UserTranscriptEntry) and message.agentId:
agent_id = message.agentId
if agent_id in agent_messages_map:
- # Insert agent messages right after this message
- result_messages.extend(agent_messages_map[agent_id])
+ # Insert agent messages right after this message (pop to insert only once)
+ result_messages.extend(agent_messages_map.pop(agent_id))
messages = result_messages
@@ -272,12 +276,15 @@ def load_directory_transcripts(
from_date: Optional[str] = None,
to_date: Optional[str] = None,
silent: bool = False,
-) -> List[TranscriptEntry]:
+) -> list[TranscriptEntry]:
"""Load all JSONL transcript files from a directory and combine them."""
- all_messages: List[TranscriptEntry] = []
+ all_messages: list[TranscriptEntry] = []
- # Find all .jsonl files
- jsonl_files = list(directory_path.glob("*.jsonl"))
+ # Find all .jsonl files, excluding agent files (they are loaded via load_transcript
+ # when a session references them via agentId)
+ jsonl_files = [
+ f for f in directory_path.glob("*.jsonl") if not f.name.startswith("agent-")
+ ]
for jsonl_file in jsonl_files:
messages = load_transcript(
@@ -300,7 +307,7 @@ def get_timestamp(entry: TranscriptEntry) -> str:
# =============================================================================
-def deduplicate_messages(messages: List[TranscriptEntry]) -> List[TranscriptEntry]:
+def deduplicate_messages(messages: list[TranscriptEntry]) -> list[TranscriptEntry]:
"""Remove duplicate messages based on (type, timestamp, sessionId, content_key).
Messages with the exact same timestamp are duplicates by definition -
@@ -320,7 +327,7 @@ def deduplicate_messages(messages: List[TranscriptEntry]) -> List[TranscriptEntr
"""
# Track seen (message_type, timestamp, is_meta, session_id, content_key) tuples
seen: set[tuple[str, str, bool, str, str]] = set()
- deduplicated: List[TranscriptEntry] = []
+ deduplicated: list[TranscriptEntry] = []
for message in messages:
# Get basic message type
@@ -343,6 +350,7 @@ def deduplicate_messages(messages: List[TranscriptEntry]) -> List[TranscriptEntr
# Get content key for differentiating concurrent messages
# - For assistant messages: use message.id (same for stutters, different for different msgs)
# - For user messages with tool results: use first tool_use_id
+ # - For summary messages: use leafUuid (summaries have no timestamp/uuid)
# - For other messages: use uuid as fallback
content_key = ""
if isinstance(message, AssistantTranscriptEntry):
@@ -350,11 +358,13 @@ def deduplicate_messages(messages: List[TranscriptEntry]) -> List[TranscriptEntr
content_key = message.message.id
elif isinstance(message, UserTranscriptEntry):
# For user messages, check for tool results
- if isinstance(message.message.content, list):
- for item in message.message.content:
- if isinstance(item, ToolResultContent):
- content_key = item.tool_use_id
- break
+ for item in message.message.content:
+ if isinstance(item, ToolResultContent):
+ content_key = item.tool_use_id
+ break
+ elif isinstance(message, SummaryTranscriptEntry):
+ # Summaries have no timestamp or uuid - use leafUuid to keep them distinct
+ content_key = message.leafUuid
# Fallback to uuid if no content key found
if not content_key:
content_key = getattr(message, "uuid", "")
@@ -455,7 +465,7 @@ def convert_jsonl_to(
# Update title to include date range if specified
if from_date or to_date:
- date_range_parts: List[str] = []
+ date_range_parts: list[str] = []
if from_date:
date_range_parts.append(f"from {from_date}")
if to_date:
@@ -512,21 +522,28 @@ def ensure_fresh_cache(
return False
# Check if cache needs updating
- jsonl_files = list(project_dir.glob("*.jsonl"))
- if not jsonl_files:
+ # Exclude agent files from direct check - they are loaded via session references
+ # Note: If only an agent file changes (session unchanged), cache won't detect it.
+ # This is acceptable since agent files typically change alongside their sessions.
+ session_jsonl_files = [
+ f for f in project_dir.glob("*.jsonl") if not f.name.startswith("agent-")
+ ]
+ if not session_jsonl_files:
return False
# Get cached project data
cached_project_data = cache_manager.get_cached_project_data()
# Check various invalidation conditions
- modified_files = cache_manager.get_modified_files(jsonl_files)
+ modified_files = cache_manager.get_modified_files(session_jsonl_files)
needs_update = (
cached_project_data is None
or from_date is not None
or to_date is not None
- or bool(modified_files) # Files changed
- or (cached_project_data.total_message_count == 0 and jsonl_files) # Stale cache
+ or bool(modified_files) # Session files changed
+ or (
+ cached_project_data.total_message_count == 0 and session_jsonl_files
+ ) # Stale cache
)
if not needs_update:
@@ -545,15 +562,15 @@ def ensure_fresh_cache(
def _update_cache_with_session_data(
- cache_manager: CacheManager, messages: List[TranscriptEntry]
+ cache_manager: CacheManager, messages: list[TranscriptEntry]
) -> None:
"""Update cache with session and project aggregate data."""
from .parser import extract_text_content
# Collect session data (similar to _collect_project_sessions but for cache)
- session_summaries: Dict[str, str] = {}
- uuid_to_session: Dict[str, str] = {}
- uuid_to_session_backup: Dict[str, str] = {}
+ session_summaries: dict[str, str] = {}
+ uuid_to_session: dict[str, str] = {}
+ uuid_to_session_backup: dict[str, str] = {}
# Build mapping from message UUID to session ID
for message in messages:
@@ -579,7 +596,7 @@ def _update_cache_with_session_data(
session_summaries[uuid_to_session_backup[leaf_uuid]] = message.summary
# Group messages by session and calculate session data
- sessions_cache_data: Dict[str, SessionCacheData] = {}
+ sessions_cache_data: dict[str, SessionCacheData] = {}
# Track token usage and timestamps for project aggregates
total_input_tokens = 0
@@ -704,7 +721,7 @@ def _update_cache_with_session_data(
)
-def _collect_project_sessions(messages: List[TranscriptEntry]) -> List[Dict[str, Any]]:
+def _collect_project_sessions(messages: list[TranscriptEntry]) -> list[dict[str, Any]]:
"""Collect session data for project index navigation."""
from .parser import extract_text_content
@@ -713,9 +730,9 @@ def _collect_project_sessions(messages: List[TranscriptEntry]) -> List[Dict[str,
# Pre-process to find and attach session summaries
# This matches the logic from renderer.py generate_html() exactly
- session_summaries: Dict[str, str] = {}
- uuid_to_session: Dict[str, str] = {}
- uuid_to_session_backup: Dict[str, str] = {}
+ session_summaries: dict[str, str] = {}
+ uuid_to_session: dict[str, str] = {}
+ uuid_to_session_backup: dict[str, str] = {}
# Build mapping from message UUID to session ID across ALL messages
# This allows summaries from later sessions to be matched to earlier sessions
@@ -745,7 +762,7 @@ def _collect_project_sessions(messages: List[TranscriptEntry]) -> List[Dict[str,
session_summaries[uuid_to_session_backup[leaf_uuid]] = message.summary
# Group messages by session (excluding warmup-only sessions)
- sessions: Dict[str, Dict[str, Any]] = {}
+ sessions: dict[str, dict[str, Any]] = {}
for message in messages:
if hasattr(message, "sessionId") and not isinstance(
message, SummaryTranscriptEntry
@@ -782,13 +799,13 @@ def _collect_project_sessions(messages: List[TranscriptEntry]) -> List[Dict[str,
)
# Convert to list format with formatted timestamps
- session_list: List[Dict[str, Any]] = []
+ session_list: list[dict[str, Any]] = []
for session_data in sessions.values():
timestamp_range = format_timestamp_range(
session_data["first_timestamp"],
session_data["last_timestamp"],
)
- session_dict: Dict[str, Any] = {
+ session_dict: dict[str, Any] = {
"id": session_data["id"],
"summary": session_data["summary"],
"timestamp_range": timestamp_range,
@@ -810,7 +827,7 @@ def _collect_project_sessions(messages: List[TranscriptEntry]) -> List[Dict[str,
def _generate_individual_session_files(
format: str,
- messages: List[TranscriptEntry],
+ messages: list[TranscriptEntry],
output_dir: Path,
from_date: Optional[str] = None,
to_date: Optional[str] = None,
@@ -830,7 +847,7 @@ def _generate_individual_session_files(
session_ids.add(session_id)
# Get session data from cache for better titles
- session_data: Dict[str, Any] = {}
+ session_data: dict[str, Any] = {}
working_directories = None
if cache_manager is not None:
project_cache = cache_manager.get_cached_project_data()
@@ -842,6 +859,9 @@ def _generate_individual_session_files(
project_title = get_project_display_name(output_dir.name, working_directories)
+ # Get renderer once outside the loop
+ renderer = get_renderer(format)
+
# Generate HTML file for each session
for session_id in session_ids:
# Create session-specific title using cache data if available
@@ -865,7 +885,7 @@ def _generate_individual_session_files(
# Add date range if specified
if from_date or to_date:
- date_range_parts: List[str] = []
+ date_range_parts: list[str] = []
if from_date:
date_range_parts.append(f"from {from_date}")
if to_date:
@@ -875,7 +895,6 @@ def _generate_individual_session_files(
# Check if session file needs regeneration
session_file_path = output_dir / f"session-{session_id}.{format}"
- renderer = get_renderer(format)
# Only regenerate if outdated, doesn't exist, or date filtering is active
should_regenerate_session = (
@@ -912,7 +931,7 @@ def process_projects_hierarchy(
raise FileNotFoundError(f"Projects path not found: {projects_path}")
# Find all project directories (those with JSONL files)
- project_dirs: List[Path] = []
+ project_dirs: list[Path] = []
for child in projects_path.iterdir():
if child.is_dir() and list(child.glob("*.jsonl")):
project_dirs.append(child)
@@ -926,7 +945,7 @@ def process_projects_hierarchy(
library_version = get_library_version()
# Process each project directory
- project_summaries: List[Dict[str, Any]] = []
+ project_summaries: list[dict[str, Any]] = []
any_cache_updated = False # Track if any project had cache updates
for project_dir in sorted(project_dirs):
try:
@@ -956,7 +975,12 @@ def process_projects_hierarchy(
)
# Get project info for index - use cached data if available
- jsonl_files = list(project_dir.glob("*.jsonl"))
+ # Exclude agent files (they are loaded via session references)
+ jsonl_files = [
+ f
+ for f in project_dir.glob("*.jsonl")
+ if not f.name.startswith("agent-")
+ ]
jsonl_count = len(jsonl_files)
last_modified: float = (
max(f.stat().st_mtime for f in jsonl_files) if jsonl_files else 0.0
diff --git a/claude_code_log/html/ansi_colors.py b/claude_code_log/html/ansi_colors.py
index cd14f989..ab6085a7 100644
--- a/claude_code_log/html/ansi_colors.py
+++ b/claude_code_log/html/ansi_colors.py
@@ -7,7 +7,7 @@
import html
import re
-from typing import Any, Dict, List
+from typing import Any
def _escape_html(text: str) -> str:
@@ -54,8 +54,8 @@ def convert_ansi_to_html(text: str) -> str:
# This catches any we might have missed, but preserves \x1b[...m color codes
text = re.sub(r"\x1b\[(?![0-9;]*m)[0-9;]*[A-Za-z]", "", text)
- result: List[str] = []
- segments: List[Dict[str, Any]] = []
+ result: list[str] = []
+ segments: list[dict[str, Any]] = []
# First pass: split text into segments with their styles
last_end = 0
@@ -68,7 +68,7 @@ def convert_ansi_to_html(text: str) -> str:
current_rgb_fg = None
current_rgb_bg = None
- for match in re.finditer(r"\x1b\[([0-9;]+)m", text):
+ for match in re.finditer(r"\x1b\[([0-9;]*)m", text):
# Add text before this escape code
if match.start() > last_end:
segments.append(
@@ -85,8 +85,11 @@ def convert_ansi_to_html(text: str) -> str:
}
)
- # Process escape codes
- codes = match.group(1).split(";")
+ # Process escape codes (empty params = reset, same as code 0)
+ code_blob = match.group(1)
+ codes = code_blob.split(";") if code_blob else ["0"]
+ if codes == [""]:
+ codes = ["0"]
i = 0
while i < len(codes):
code = codes[i]
@@ -189,16 +192,20 @@ def convert_ansi_to_html(text: str) -> str:
elif code == "38" and i + 1 < len(codes) and codes[i + 1] == "2":
if i + 4 < len(codes):
r, g, b = codes[i + 2], codes[i + 3], codes[i + 4]
- current_rgb_fg = f"color: rgb({r}, {g}, {b})"
- current_fg = None
+ # Validate RGB values are numeric to avoid invalid CSS
+ if r.isdigit() and g.isdigit() and b.isdigit():
+ current_rgb_fg = f"color: rgb({r}, {g}, {b})"
+ current_fg = None
i += 4
# RGB background color
elif code == "48" and i + 1 < len(codes) and codes[i + 1] == "2":
if i + 4 < len(codes):
r, g, b = codes[i + 2], codes[i + 3], codes[i + 4]
- current_rgb_bg = f"background-color: rgb({r}, {g}, {b})"
- current_bg = None
+ # Validate RGB values are numeric to avoid invalid CSS
+ if r.isdigit() and g.isdigit() and b.isdigit():
+ current_rgb_bg = f"background-color: rgb({r}, {g}, {b})"
+ current_bg = None
i += 4
i += 1
@@ -226,8 +233,8 @@ def convert_ansi_to_html(text: str) -> str:
if not segment["text"]:
continue
- classes: List[str] = []
- styles: List[str] = []
+ classes: list[str] = []
+ styles: list[str] = []
if segment["fg"]:
classes.append(segment["fg"])
@@ -249,7 +256,7 @@ def convert_ansi_to_html(text: str) -> str:
escaped_text = _escape_html(segment["text"])
if classes or styles:
- attrs: List[str] = []
+ attrs: list[str] = []
if classes:
attrs.append(f'class="{" ".join(classes)}"')
if styles:
diff --git a/claude_code_log/html/renderer.py b/claude_code_log/html/renderer.py
index c2efdc8f..7c0e9dea 100644
--- a/claude_code_log/html/renderer.py
+++ b/claude_code_log/html/renderer.py
@@ -1,7 +1,7 @@
"""HTML renderer implementation for Claude Code transcripts."""
from pathlib import Path
-from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
+from typing import TYPE_CHECKING, Any, Optional, Tuple
from ..cache import get_library_version
from ..models import (
@@ -168,8 +168,8 @@ def _format_message_content(self, message: TemplateMessage) -> str:
return ""
def _flatten_preorder(
- self, roots: List[TemplateMessage]
- ) -> List[Tuple[TemplateMessage, str]]:
+ self, roots: list[TemplateMessage]
+ ) -> list[Tuple[TemplateMessage, str]]:
"""Flatten message tree via pre-order traversal, formatting each message.
Traverses the tree depth-first (pre-order), formats each message's
@@ -181,7 +181,7 @@ def _flatten_preorder(
Returns:
Flat list of (message, html_content) tuples in pre-order
"""
- flat: List[Tuple[TemplateMessage, str]] = []
+ flat: list[Tuple[TemplateMessage, str]] = []
def visit(msg: TemplateMessage) -> None:
html = self._format_message_content(msg)
@@ -196,7 +196,7 @@ def visit(msg: TemplateMessage) -> None:
def generate(
self,
- messages: List[TranscriptEntry],
+ messages: list[TranscriptEntry],
title: Optional[str] = None,
combined_transcript_link: Optional[str] = None,
) -> str:
@@ -239,7 +239,7 @@ def generate(
def generate_session(
self,
- messages: List[TranscriptEntry],
+ messages: list[TranscriptEntry],
session_id: str,
title: Optional[str] = None,
cache_manager: Optional["CacheManager"] = None,
@@ -266,7 +266,7 @@ def generate_session(
def generate_projects_index(
self,
- project_summaries: List[Dict[str, Any]],
+ project_summaries: list[dict[str, Any]],
from_date: Optional[str] = None,
to_date: Optional[str] = None,
) -> str:
@@ -303,7 +303,7 @@ def is_outdated(self, file_path: Path) -> bool:
def generate_html(
- messages: List[TranscriptEntry],
+ messages: list[TranscriptEntry],
title: Optional[str] = None,
combined_transcript_link: Optional[str] = None,
) -> str:
@@ -315,7 +315,7 @@ def generate_html(
def generate_session_html(
- messages: List[TranscriptEntry],
+ messages: list[TranscriptEntry],
session_id: str,
title: Optional[str] = None,
cache_manager: Optional["CacheManager"] = None,
@@ -325,7 +325,7 @@ def generate_session_html(
def generate_projects_index_html(
- project_summaries: List[Dict[str, Any]],
+ project_summaries: list[dict[str, Any]],
from_date: Optional[str] = None,
to_date: Optional[str] = None,
) -> str:
diff --git a/claude_code_log/html/renderer_code.py b/claude_code_log/html/renderer_code.py
index ab93f039..7a633d45 100644
--- a/claude_code_log/html/renderer_code.py
+++ b/claude_code_log/html/renderer_code.py
@@ -10,7 +10,7 @@
import html
import os
import re
-from typing import Callable, List, Optional
+from typing import Callable, Optional
from pygments import highlight # type: ignore[reportUnknownVariableType]
from pygments.lexers import TextLexer, get_lexer_by_name, get_all_lexers # type: ignore[reportUnknownVariableType]
@@ -193,7 +193,7 @@ def render_line_diff(
sm = difflib.SequenceMatcher(None, old_line.rstrip("\n"), new_line.rstrip("\n"))
# Build old line with highlighting
- old_parts: List[str] = []
+ old_parts: list[str] = []
old_parts.append(
"
-"
)
@@ -208,7 +208,7 @@ def render_line_diff(
old_parts.append("
")
# Build new line with highlighting
- new_parts: List[str] = []
+ new_parts: list[str] = []
new_parts.append(
"+"
)
@@ -245,7 +245,7 @@ def render_single_diff(
# Generate unified diff to identify changed lines
differ = difflib.Differ()
- diff: List[str] = list(differ.compare(old_lines, new_lines))
+ diff: list[str] = list(differ.compare(old_lines, new_lines))
html_parts = ["
"]
@@ -257,7 +257,7 @@ def render_single_diff(
if prefix == "- ":
# Removed line - look ahead for corresponding addition
- removed_lines: List[str] = [content]
+ removed_lines: list[str] = [content]
j = i + 1
# Collect consecutive removed lines
@@ -270,7 +270,7 @@ def render_single_diff(
j += 1
# Collect consecutive added lines
- added_lines: List[str] = []
+ added_lines: list[str] = []
while j < len(diff) and diff[j].startswith("+ "):
added_lines.append(diff[j][2:])
j += 1
diff --git a/claude_code_log/html/templates/components/timeline.html b/claude_code_log/html/templates/components/timeline.html
index 7931efeb..9e09fbbb 100644
--- a/claude_code_log/html/templates/components/timeline.html
+++ b/claude_code_log/html/templates/components/timeline.html
@@ -29,7 +29,11 @@
'thinking': { id: 'thinking', content: '💭 Thinking', style: 'background-color: #fce4ec;' },
'system': { id: 'system', content: '⚙️ System', style: 'background-color: #ffeee1;' },
'image': { id: 'image', content: '🖼️ Image', style: 'background-color: #e1f5fe;' },
- 'sidechain': { id: 'sidechain', content: '🔗 Sub-assistant', style: 'background-color: #f5f5f5;' }
+ 'sidechain': { id: 'sidechain', content: '🔗 Sub-assistant', style: 'background-color: #f5f5f5;' },
+ 'slash-command': { id: 'slash-command', content: '⌨️ Slash Command', style: 'background-color: #e8eaf6;' },
+ 'command-output': { id: 'command-output', content: '📋 Command Output', style: 'background-color: #efebe9;' },
+ 'bash-input': { id: 'bash-input', content: '💻 Bash Input', style: 'background-color: #e8eaf6;' },
+ 'bash-output': { id: 'bash-output', content: '📄 Bash Output', style: 'background-color: #efebe9;' }
};
// Build timeline data from messages
@@ -55,6 +59,14 @@
messageType = 'sidechain';
} else if (classList.includes('system-warning') || classList.includes('system-error') || classList.includes('system-info')) {
messageType = 'system';
+ } else if (classList.includes('slash-command')) {
+ messageType = 'slash-command';
+ } else if (classList.includes('command-output')) {
+ messageType = 'command-output';
+ } else if (classList.includes('bash-input')) {
+ messageType = 'bash-input';
+ } else if (classList.includes('bash-output')) {
+ messageType = 'bash-output';
} else {
// Look for standard message types
messageType = classList.find(cls =>
@@ -179,10 +191,15 @@
const activeTypes = Array.from(document.querySelectorAll('.filter-toggle.active'))
.map(toggle => toggle.dataset.type);
+ // Get all filter toggle types (to know which groups have filter controls)
+ const allFilterTypes = Array.from(document.querySelectorAll('.filter-toggle'))
+ .map(toggle => toggle.dataset.type);
+
// Update groups visibility based on filter states
+ // Groups with filter toggles follow toggle state; groups without toggles stay visible
const updatedGroups = groups.map(group => ({
...group,
- visible: activeTypes.includes(group.id)
+ visible: allFilterTypes.includes(group.id) ? activeTypes.includes(group.id) : true
}));
// Update timeline groups
@@ -260,7 +277,7 @@
axis: 2
},
groupOrder: (a, b) => {
- const order = ['user', 'assistant', 'sidechain', 'tool_use', 'tool_result', 'thinking', 'system', 'image'];
+ const order = ['user', 'system', 'slash-command', 'command-output', 'bash-input', 'bash-output', 'thinking', 'assistant', 'sidechain', 'tool_use', 'tool_result', 'image'];
return order.indexOf(a.id) - order.indexOf(b.id);
}
};
diff --git a/claude_code_log/html/templates/transcript.html b/claude_code_log/html/templates/transcript.html
index 8a256853..2f812bdf 100644
--- a/claude_code_log/html/templates/transcript.html
+++ b/claude_code_log/html/templates/transcript.html
@@ -272,7 +272,7 @@
🔍 Search & Filter
// Count messages by type and update button labels
function updateMessageCounts() {
- const messageTypes = ['user', 'assistant', 'sidechain', 'system', 'thinking', 'image'];
+ const messageTypes = ['assistant', 'sidechain', 'system', 'thinking', 'image'];
messageTypes.forEach(type => {
const messages = document.querySelectorAll(`.message.${type}:not(.session-header)`);
@@ -292,8 +292,23 @@
🔍 Search & Filter
}
});
- // Handle combined "tool" filter (tool_use + tool_result + bash messages)
- const toolMessages = document.querySelectorAll(`.message.tool_use:not(.session-header), .message.tool_result:not(.session-header), .message.bash-input:not(.session-header), .message.bash-output:not(.session-header)`);
+ // Handle combined "user" filter (user + bash-input + bash-output)
+ const userMessages = document.querySelectorAll(`.message.user:not(.session-header), .message.bash-input:not(.session-header), .message.bash-output:not(.session-header)`);
+ const userCount = userMessages.length;
+ const userToggle = document.querySelector(`[data-type="user"]`);
+ const userCountSpan = userToggle ? userToggle.querySelector('.count') : null;
+
+ if (userCountSpan) {
+ userCountSpan.textContent = `(${userCount})`;
+ if (userCount === 0) {
+ userToggle.style.display = 'none';
+ } else {
+ userToggle.style.display = 'flex';
+ }
+ }
+
+ // Handle combined "tool" filter (tool_use + tool_result)
+ const toolMessages = document.querySelectorAll(`.message.tool_use:not(.session-header), .message.tool_result:not(.session-header)`);
const toolCount = toolMessages.length;
const toolToggle = document.querySelector(`[data-type="tool"]`);
const toolCountSpan = toolToggle ? toolToggle.querySelector('.count') : null;
@@ -314,11 +329,14 @@
🔍 Search & Filter
.filter(toggle => toggle.classList.contains('active'))
.map(toggle => toggle.dataset.type);
- // Expand "tool" to include tool_use, tool_result, and bash messages
+ // Expand filter types to their corresponding CSS classes
const expandedTypes = [];
activeTypes.forEach(type => {
if (type === 'tool') {
- expandedTypes.push('tool_use', 'tool_result', 'bash-input', 'bash-output');
+ expandedTypes.push('tool_use', 'tool_result');
+ } else if (type === 'user') {
+ // User filter includes bash commands (user-initiated)
+ expandedTypes.push('user', 'bash-input', 'bash-output');
} else {
expandedTypes.push(type);
}
@@ -362,7 +380,7 @@
🔍 Search & Filter
}
function updateVisibleCounts() {
- const messageTypes = ['user', 'assistant', 'sidechain', 'system', 'thinking', 'image'];
+ const messageTypes = ['assistant', 'sidechain', 'system', 'thinking', 'image'];
messageTypes.forEach(type => {
const visibleMessages = document.querySelectorAll(`.message.${type}:not(.session-header):not(.filtered-hidden)`);
@@ -389,9 +407,32 @@
🔍 Search & Filter
}
});
- // Handle combined "tool" filter separately (includes bash messages)
- const visibleToolMessages = document.querySelectorAll(`.message.tool_use:not(.session-header):not(.filtered-hidden), .message.tool_result:not(.session-header):not(.filtered-hidden), .message.bash-input:not(.session-header):not(.filtered-hidden), .message.bash-output:not(.session-header):not(.filtered-hidden)`);
- const totalToolMessages = document.querySelectorAll(`.message.tool_use:not(.session-header), .message.tool_result:not(.session-header), .message.bash-input:not(.session-header), .message.bash-output:not(.session-header)`);
+ // Handle combined "user" filter separately (includes bash messages)
+ const visibleUserMessages = document.querySelectorAll(`.message.user:not(.session-header):not(.filtered-hidden), .message.bash-input:not(.session-header):not(.filtered-hidden), .message.bash-output:not(.session-header):not(.filtered-hidden)`);
+ const totalUserMessages = document.querySelectorAll(`.message.user:not(.session-header), .message.bash-input:not(.session-header), .message.bash-output:not(.session-header)`);
+ const visibleUserCount = visibleUserMessages.length;
+ const totalUserCount = totalUserMessages.length;
+
+ const userToggle = document.querySelector(`[data-type="user"]`);
+ const userCountSpan = userToggle ? userToggle.querySelector('.count') : null;
+
+ if (userCountSpan && totalUserCount > 0) {
+ const activeTypes = Array.from(filterToggles)
+ .filter(toggle => toggle.classList.contains('active'))
+ .map(toggle => toggle.dataset.type);
+
+ const isFiltering = activeTypes.length < filterToggles.length;
+
+ if (isFiltering && visibleUserCount !== totalUserCount) {
+ userCountSpan.textContent = `(${visibleUserCount}/${totalUserCount})`;
+ } else {
+ userCountSpan.textContent = `(${totalUserCount})`;
+ }
+ }
+
+ // Handle combined "tool" filter separately
+ const visibleToolMessages = document.querySelectorAll(`.message.tool_use:not(.session-header):not(.filtered-hidden), .message.tool_result:not(.session-header):not(.filtered-hidden)`);
+ const totalToolMessages = document.querySelectorAll(`.message.tool_use:not(.session-header), .message.tool_result:not(.session-header)`);
const visibleToolCount = visibleToolMessages.length;
const totalToolCount = totalToolMessages.length;
diff --git a/claude_code_log/html/tool_formatters.py b/claude_code_log/html/tool_formatters.py
index 35053562..962a9f2f 100644
--- a/claude_code_log/html/tool_formatters.py
+++ b/claude_code_log/html/tool_formatters.py
@@ -14,9 +14,11 @@
HTML for display in transcripts.
"""
+import base64
+import binascii
import json
import re
-from typing import Any, Dict, List, Optional, cast
+from typing import Any, Optional, cast
from .utils import (
escape_html,
@@ -48,7 +50,7 @@
def _render_question_item(q: AskUserQuestionItem) -> str:
"""Render a single question item to HTML."""
- html_parts: List[str] = ['
']
+ html_parts: list[str] = ['
']
# Header (if present)
if q.header:
@@ -89,7 +91,7 @@ def format_askuserquestion_content(ask_input: AskUserQuestionInput) -> str:
options (with label and description), and multiSelect flag.
"""
# Build list of questions from both formats
- questions: List[AskUserQuestionItem] = list(ask_input.questions)
+ questions: list[AskUserQuestionItem] = list(ask_input.questions)
# Handle single question format (legacy)
if not questions and ask_input.question:
@@ -99,7 +101,7 @@ def format_askuserquestion_content(ask_input: AskUserQuestionInput) -> str:
return '
No question
'
# Build HTML for all questions
- html_parts: List[str] = ['
']
+ html_parts: list[str] = ['
']
for q in questions:
html_parts.append(_render_question_item(q))
html_parts.append("
") # Close askuserquestion-content
@@ -140,7 +142,7 @@ def format_askuserquestion_result(content: str) -> str:
return ""
# Build styled HTML
- html_parts: List[str] = [
+ html_parts: list[str] = [
'
'
]
@@ -217,7 +219,7 @@ def format_todowrite_content(todo_input: TodoWriteInput) -> str:
status_emojis = {"pending": "⏳", "in_progress": "🔄", "completed": "✅"}
# Build todo list HTML - todos are typed TodoWriteItem objects
- todo_items: List[str] = []
+ todo_items: list[str] = []
for todo in todo_input.todos:
todo_id = escape_html(todo.id) if todo.id else ""
content = escape_html(todo.content) if todo.content else ""
@@ -266,7 +268,7 @@ def format_read_tool_content(read_input: ReadInput) -> str: # noqa: ARG001
def _parse_cat_n_snippet(
- lines: List[str], start_idx: int = 0
+ lines: list[str], start_idx: int = 0
) -> Optional[tuple[str, Optional[str], int]]:
"""Parse cat-n formatted snippet from lines.
@@ -277,7 +279,7 @@ def _parse_cat_n_snippet(
Returns:
Tuple of (code_content, system_reminder, line_offset) or None if not parseable
"""
- code_lines: List[str] = []
+ code_lines: list[str] = []
system_reminder: Optional[str] = None
in_system_reminder = False
line_offset = 1 # Default offset
@@ -631,7 +633,7 @@ def format_tool_use_title(tool_use: ToolUseContent) -> str:
# -- Generic Parameter Table --------------------------------------------------
-def render_params_table(params: Dict[str, Any]) -> str:
+def render_params_table(params: dict[str, Any]) -> str:
"""Render a dictionary of parameters as an HTML table.
Reusable for tool parameters, diagnostic objects, etc.
@@ -788,11 +790,11 @@ def format_tool_result_content(
if isinstance(tool_result.content, str):
raw_content = tool_result.content
has_images = False
- image_html_parts: List[str] = []
+ image_html_parts: list[str] = []
else:
# Content is a list of structured items, extract text and images
- content_parts: List[str] = []
- image_html_parts: List[str] = []
+ content_parts: list[str] = []
+ image_html_parts: list[str] = []
for item in tool_result.content:
item_type = item.get("type")
if item_type == "text":
@@ -801,14 +803,28 @@ def format_tool_result_content(
content_parts.append(text_value)
elif item_type == "image":
# Handle image content within tool results
- source = cast(Dict[str, Any], item.get("source", {}))
+ source = cast(dict[str, Any], item.get("source", {}))
if source:
media_type: str = str(source.get("media_type", "image/png"))
+ # Restrict to safe image types to prevent XSS via SVG
+ allowed_media_types = {
+ "image/png",
+ "image/jpeg",
+ "image/gif",
+ "image/webp",
+ }
+ if media_type not in allowed_media_types:
+ continue
data: str = str(source.get("data", ""))
if data:
+ # Validate base64 data to prevent corruption/injection
+ try:
+ base64.b64decode(data, validate=True)
+ except (binascii.Error, ValueError):
+ continue
data_url = f"data:{media_type};base64,{data}"
image_html_parts.append(
- f'

'
)
raw_content = "\n".join(content_parts)
@@ -868,15 +884,21 @@ def format_tool_result_content(
# Check if this looks like Bash tool output and process ANSI codes
# Bash tool results often contain ANSI escape sequences and terminal output
- if _looks_like_bash_output(raw_content):
- escaped_content = convert_ansi_to_html(raw_content)
- else:
- escaped_content = escape_html(raw_content)
+ is_ansi = _looks_like_bash_output(raw_content)
+ full_html = (
+ convert_ansi_to_html(raw_content) if is_ansi else escape_html(raw_content)
+ )
+ # For preview, always use plain escaped text (don't truncate HTML with tags)
+ preview_html = (
+ escape_html(raw_content[:200]) + "..."
+ if len(raw_content) > 200
+ else escape_html(raw_content)
+ )
# Build final HTML based on content length and presence of images
if has_images:
# Combine text and images
- text_html = f"
{escaped_content}" if escaped_content else ""
+ text_html = f"
{full_html}" if full_html else ""
images_html = "".join(image_html_parts)
combined_content = f"{text_html}{images_html}"
@@ -895,18 +917,17 @@ def format_tool_result_content(
else:
# Text-only content (existing behavior)
# For simple content, show directly without collapsible wrapper
- if len(escaped_content) <= 200:
- return f"
{escaped_content}"
+ if len(raw_content) <= 200:
+ return f"
{full_html}"
# For longer content, use collapsible details but no extra wrapper
- preview_text = escaped_content[:200] + "..."
return f"""
-
+
-
{escaped_content}
+
{full_html}
"""
diff --git a/claude_code_log/html/user_formatters.py b/claude_code_log/html/user_formatters.py
index 21528208..b89b329d 100644
--- a/claude_code_log/html/user_formatters.py
+++ b/claude_code_log/html/user_formatters.py
@@ -8,10 +8,6 @@
- tool_formatters.py: tool use/result content
"""
-from typing import List
-
-import mistune
-
from .ansi_colors import convert_ansi_to_html
from ..models import (
BashInputContent,
@@ -52,7 +48,7 @@ def format_slash_command_content(content: SlashCommandContent) -> str:
escaped_command_contents = escape_html(formatted_contents)
# Build the content HTML - command name is the primary content
- content_parts: List[str] = [f"
{escaped_command_name}"]
+ content_parts: list[str] = [f"
{escaped_command_name}"]
if content.command_args:
content_parts.append(f"
Args: {escaped_command_args}")
if content.command_contents:
@@ -64,8 +60,8 @@ def format_slash_command_content(content: SlashCommandContent) -> str:
f"
Content:{escaped_command_contents}"
)
else:
- # Long content, make collapsible
- preview = "\n".join(lines[:5])
+ # Long content, make collapsible with truncation indicator
+ preview = "\n".join(lines[:5]) + "\n..."
collapsible = render_collapsible_code(
f"
{preview}",
f"
{escaped_command_contents}",
@@ -87,9 +83,10 @@ def format_command_output_content(content: CommandOutputContent) -> str:
HTML string for the command output display
"""
if content.is_markdown:
- # Render as markdown
- markdown_html = mistune.html(content.stdout)
- return f"
{markdown_html}
"
+ # Render as markdown using shared renderer for GFM plugins and syntax highlighting
+ return render_markdown_collapsible(
+ content.stdout, "command-output-content", line_threshold=20
+ )
else:
# Convert ANSI codes to HTML for colored display
html_content = convert_ansi_to_html(content.stdout)
@@ -128,18 +125,18 @@ def format_bash_output_content(
Returns:
HTML string for the bash output display
"""
- output_parts: List[tuple[str, str, int, str]] = []
+ output_parts: list[tuple[str, str, int, str]] = []
total_lines = 0
if content.stdout:
escaped_stdout = convert_ansi_to_html(content.stdout)
- stdout_lines = content.stdout.count("\n") + 1
+ stdout_lines = len(content.stdout.splitlines())
total_lines += stdout_lines
output_parts.append(("stdout", escaped_stdout, stdout_lines, content.stdout))
if content.stderr:
escaped_stderr = convert_ansi_to_html(content.stderr)
- stderr_lines = content.stderr.count("\n") + 1
+ stderr_lines = len(content.stderr.splitlines())
total_lines += stderr_lines
output_parts.append(("stderr", escaped_stderr, stderr_lines, content.stderr))
@@ -150,7 +147,7 @@ def format_bash_output_content(
)
# Build the HTML parts
- html_parts: List[str] = []
+ html_parts: list[str] = []
for output_type, escaped_content, _, _ in output_parts:
css_name = f"bash-{output_type}"
html_parts.append(f"
{escaped_content}")
@@ -166,13 +163,12 @@ def format_bash_output_content(
if total_lines > preview_lines:
preview_html += "\n..."
- return f"""
-
- {total_lines} lines
- {preview_html}
-
- {full_html}
- """
+ # Use render_collapsible_code for consistent collapse markup
+ return render_collapsible_code(
+ preview_html=f"
{preview_html}",
+ full_html=full_html,
+ line_count=total_lines,
+ )
return full_html
@@ -205,7 +201,7 @@ def format_user_text_model_content(content: UserTextContent) -> str:
Returns:
HTML string combining IDE notifications and main text content
"""
- parts: List[str] = []
+ parts: list[str] = []
# Add IDE notifications first if present
if content.ide_notifications:
@@ -280,21 +276,21 @@ def _format_selection(selection: IdeSelection) -> str:
# For large selections, make them collapsible
if len(selection.content) > 200:
preview = escape_html(selection.content[:150]) + "..."
- return f"""
-
-
- 📝 {preview}
- {escaped_content}
-
-
- """
+ return (
+ f"
"
+ f"
"
+ f"📝 {preview}
"
+ f"{escaped_content}"
+ f" "
+ f"
"
+ )
else:
return f"
📝 {escaped_content}
"
-def _format_diagnostic(diagnostic: IdeDiagnostic) -> List[str]:
+def _format_diagnostic(diagnostic: IdeDiagnostic) -> list[str]:
"""Format a single IDE diagnostic as HTML (may produce multiple notifications)."""
- notifications: List[str] = []
+ notifications: list[str] = []
if diagnostic.diagnostics:
# Parsed JSON diagnostics - render each as a table
@@ -308,17 +304,19 @@ def _format_diagnostic(diagnostic: IdeDiagnostic) -> List[str]:
notifications.append(notification_html)
elif diagnostic.raw_content:
# JSON parsing failed, render as plain text
+ is_truncated = len(diagnostic.raw_content) > 200
escaped_content = escape_html(diagnostic.raw_content[:200])
+ truncation_marker = "..." if is_truncated else ""
notification_html = (
f"
🤖 IDE Diagnostics (parse error)
"
- f"
{escaped_content}..."
+ f"
{escaped_content}{truncation_marker}"
)
notifications.append(notification_html)
return notifications
-def format_ide_notification_content(content: IdeNotificationContent) -> List[str]:
+def format_ide_notification_content(content: IdeNotificationContent) -> list[str]:
"""Format IDE notification content as HTML.
Takes structured IdeNotificationContent and returns a list of HTML
@@ -330,7 +328,7 @@ def format_ide_notification_content(content: IdeNotificationContent) -> List[str
Returns:
List of HTML notification strings
"""
- notifications: List[str] = []
+ notifications: list[str] = []
# Format opened files
for opened_file in content.opened_files:
diff --git a/claude_code_log/html/utils.py b/claude_code_log/html/utils.py
index fdd8c379..8a99edbb 100644
--- a/claude_code_log/html/utils.py
+++ b/claude_code_log/html/utils.py
@@ -13,6 +13,7 @@
HTML-specific output.
"""
+import functools
import html
from pathlib import Path
from typing import Any, Optional, TYPE_CHECKING
@@ -83,8 +84,15 @@ def get_message_emoji(msg: "TemplateMessage") -> str:
if msg_type == "session_header":
return "📋"
elif msg_type == "user":
+ # Command output has no emoji (neutral - can be from built-in or user command)
+ if msg.modifiers.is_command_output:
+ return ""
+ return "🤷"
+ elif msg_type == "bash-input":
return "🤷"
elif msg_type == "assistant":
+ if msg.modifiers.is_sidechain:
+ return "🔗"
return "🤖"
elif msg_type == "system":
return "⚙️"
@@ -131,7 +139,7 @@ def block_code(code: str, info: Optional[str] = None) -> str:
# Language hint provided, use Pygments
lang = info.split()[0] if info else ""
try:
- lexer = get_lexer_by_name(lang, stripall=True) # type: ignore[reportUnknownVariableType]
+ lexer = get_lexer_by_name(lang, stripall=False) # type: ignore[reportUnknownVariableType]
except ClassNotFound:
lexer = TextLexer() # type: ignore[reportUnknownVariableType]
@@ -152,24 +160,29 @@ def block_code(code: str, info: Optional[str] = None) -> str:
return plugin_pygments
+@functools.lru_cache(maxsize=1)
+def _get_markdown_renderer() -> mistune.Markdown:
+ """Get cached Mistune markdown renderer with Pygments syntax highlighting."""
+ return mistune.create_markdown(
+ plugins=[
+ "strikethrough",
+ "footnotes",
+ "table",
+ "url",
+ "task_lists",
+ "def_list",
+ _create_pygments_plugin(),
+ ],
+ escape=False, # Don't escape HTML since we want to render markdown properly
+ hard_wrap=True, # Line break for newlines (checklists in Assistant messages)
+ )
+
+
def render_markdown(text: str) -> str:
"""Convert markdown text to HTML using mistune with Pygments syntax highlighting."""
# Track markdown rendering time if enabled
with timing_stat("_markdown_timings"):
- # Configure mistune with GitHub-flavored markdown features
- renderer = mistune.create_markdown(
- plugins=[
- "strikethrough",
- "footnotes",
- "table",
- "url",
- "task_lists",
- "def_list",
- _create_pygments_plugin(),
- ],
- escape=False, # Don't escape HTML since we want to render markdown properly
- hard_wrap=True, # Line break for newlines (checklists in Assistant messages)
- )
+ renderer = _get_markdown_renderer()
return str(renderer(text))
@@ -281,7 +294,7 @@ def render_file_content_collapsible(
html_parts = [f"
"]
- lines = code_content.split("\n")
+ lines = code_content.splitlines()
if len(lines) > line_threshold:
# Extract preview from already-highlighted HTML (avoids double highlighting)
preview_html = truncate_highlighted_preview(
@@ -331,8 +344,9 @@ def starts_with_emoji(text: str) -> bool:
)
+@functools.lru_cache(maxsize=1)
def get_template_environment() -> Environment:
- """Get Jinja2 template environment for HTML rendering.
+ """Get cached Jinja2 template environment for HTML rendering.
Creates a Jinja2 environment configured with:
- Template loading from the templates directory
@@ -340,7 +354,7 @@ def get_template_environment() -> Environment:
- Custom template filters/functions (starts_with_emoji)
Returns:
- Configured Jinja2 Environment
+ Configured Jinja2 Environment (cached after first call)
"""
templates_dir = Path(__file__).parent / "templates"
env = Environment(
diff --git a/claude_code_log/models.py b/claude_code_log/models.py
index 135983e1..eaf3315f 100644
--- a/claude_code_log/models.py
+++ b/claude_code_log/models.py
@@ -5,13 +5,13 @@
from dataclasses import dataclass
from enum import Enum
-from typing import Any, List, Union, Optional, Dict, Literal
+from typing import Any, Union, Optional, Literal
from anthropic.types import Message as AnthropicMessage
from anthropic.types import StopReason
from anthropic.types import Usage as AnthropicUsage
from anthropic.types.content_block import ContentBlock
-from pydantic import BaseModel
+from pydantic import BaseModel, PrivateAttr
class MessageType(str, Enum):
@@ -122,8 +122,8 @@ class HookSummaryContent(MessageContent):
"""
has_output: bool
- hook_errors: List[str] # Error messages from hooks
- hook_infos: List[HookInfo] # Info about each hook executed
+ hook_errors: list[str] # Error messages from hooks
+ hook_infos: list[HookInfo] # Info about each hook executed
# =============================================================================
@@ -187,7 +187,7 @@ class ToolResultContentModel(MessageContent):
"""
tool_use_id: str
- content: Any # Union[str, List[Dict[str, Any]]]
+ content: Any # Union[str, list[dict[str, Any]]]
is_error: bool = False
tool_name: Optional[str] = None # Name of the tool that produced this result
file_path: Optional[str] = None # File path for Read/Edit/Write tools
@@ -239,7 +239,7 @@ class IdeDiagnostic:
Contains either parsed JSON diagnostics or raw content if parsing failed.
"""
- diagnostics: Optional[List[Dict[str, Any]]] = None # Parsed diagnostic objects
+ diagnostics: Optional[list[dict[str, Any]]] = None # Parsed diagnostic objects
raw_content: Optional[str] = None # Fallback if JSON parsing failed
@@ -255,9 +255,9 @@ class IdeNotificationContent(MessageContent):
Format-neutral: stores structured data, not HTML.
"""
- opened_files: List[IdeOpenedFile]
- selections: List[IdeSelection]
- diagnostics: List[IdeDiagnostic]
+ opened_files: list[IdeOpenedFile]
+ selections: list[IdeSelection]
+ diagnostics: list[IdeDiagnostic]
remaining_text: str # Text after notifications extracted
@@ -379,7 +379,7 @@ class EditOutput(MessageContent):
file_path: str
success: bool
- diffs: List[EditDiff] # Changes made
+ diffs: list[EditDiff] # Changes made
message: str # Result message or code snippet
start_line: int = 1 # Starting line number for code display
@@ -424,7 +424,7 @@ class GlobOutput(MessageContent):
"""
pattern: str
- files: List[str] # Matching file paths
+ files: list[str] # Matching file paths
truncated: bool # Whether list was truncated
@@ -438,7 +438,7 @@ class GrepOutput(MessageContent):
"""
pattern: str
- matches: List[str] # Matching lines/files
+ matches: list[str] # Matching lines/files
output_mode: str # "content", "files_with_matches", or "count"
truncated: bool
@@ -526,7 +526,7 @@ class MultiEditInput(BaseModel):
"""Input parameters for the MultiEdit tool."""
file_path: str
- edits: List[EditItem]
+ edits: list[EditItem]
class GlobInput(BaseModel):
@@ -581,7 +581,7 @@ class TodoWriteItem(BaseModel):
class TodoWriteInput(BaseModel):
"""Input parameters for the TodoWrite tool."""
- todos: List[TodoWriteItem]
+ todos: list[TodoWriteItem]
class AskUserQuestionOption(BaseModel):
@@ -602,7 +602,7 @@ class AskUserQuestionItem(BaseModel):
question: str = ""
header: Optional[str] = None
- options: List[AskUserQuestionOption] = []
+ options: list[AskUserQuestionOption] = []
multiSelect: bool = False
@@ -612,7 +612,7 @@ class AskUserQuestionInput(BaseModel):
Supports both modern format (questions list) and legacy format (single question).
"""
- questions: List[AskUserQuestionItem] = []
+ questions: list[AskUserQuestionItem] = []
question: Optional[str] = None # Legacy single question format
@@ -637,7 +637,7 @@ class ExitPlanModeInput(BaseModel):
TodoWriteInput,
AskUserQuestionInput,
ExitPlanModeInput,
- Dict[str, Any], # Fallback for unknown tools
+ dict[str, Any], # Fallback for unknown tools
]
@@ -649,7 +649,7 @@ class UsageInfo(BaseModel):
cache_read_input_tokens: Optional[int] = None
output_tokens: Optional[int] = None
service_tier: Optional[str] = None
- server_tool_use: Optional[Dict[str, Any]] = None
+ server_tool_use: Optional[dict[str, Any]] = None
def to_anthropic_usage(self) -> Optional[AnthropicUsage]:
"""Convert to Anthropic Usage type if both required fields are present."""
@@ -688,8 +688,10 @@ class ToolUseContent(BaseModel, MessageContent):
type: Literal["tool_use"]
id: str
name: str
- input: Dict[str, Any]
- _parsed_input: Optional["ToolInput"] = None # Cached parsed input
+ input: dict[str, Any]
+ _parsed_input: Optional["ToolInput"] = PrivateAttr(
+ default=None
+ ) # Cached parsed input
@property
def parsed_input(self) -> "ToolInput":
@@ -711,7 +713,7 @@ def parsed_input(self) -> "ToolInput":
class ToolResultContent(BaseModel):
type: Literal["tool_result"]
tool_use_id: str
- content: Union[str, List[Dict[str, Any]]]
+ content: Union[str, list[dict[str, Any]]]
is_error: Optional[bool] = None
agentId: Optional[str] = None # Reference to agent file for sub-agent messages
@@ -746,7 +748,7 @@ class ImageContent(BaseModel, MessageContent):
class UserMessage(BaseModel):
role: Literal["user"]
- content: Union[str, List[ContentItem]]
+ content: list[ContentItem]
usage: Optional["UsageInfo"] = None # For type compatibility with AssistantMessage
@@ -757,7 +759,7 @@ class AssistantMessage(BaseModel):
type: Literal["message"]
role: Literal["assistant"]
model: str
- content: List[ContentItem]
+ content: list[ContentItem]
stop_reason: Optional[StopReason] = None
stop_sequence: Optional[str] = None
usage: Optional[UsageInfo] = None
@@ -789,8 +791,8 @@ def from_anthropic_message(
# ReadOutput, EditOutput, etc. (see Tool Output Content Models section)
ToolUseResult = Union[
str,
- List[Any], # Covers List[TodoWriteItem], List[ContentItem], etc.
- Dict[str, Any], # Covers structured results
+ list[Any], # Covers list[TodoWriteItem], list[ContentItem], etc.
+ dict[str, Any], # Covers structured results
]
@@ -837,8 +839,8 @@ class SystemTranscriptEntry(BaseTranscriptEntry):
level: Optional[str] = None # e.g., "warning", "info", "error"
# Hook summary fields (for subtype="stop_hook_summary")
hasOutput: Optional[bool] = None
- hookErrors: Optional[List[str]] = None
- hookInfos: Optional[List[Dict[str, Any]]] = None
+ hookErrors: Optional[list[str]] = None
+ hookInfos: Optional[list[dict[str, Any]]] = None
preventedContinuation: Optional[bool] = None
@@ -857,7 +859,7 @@ class QueueOperationTranscriptEntry(BaseModel):
operation: Literal["enqueue", "dequeue", "remove", "popAll"]
timestamp: str
sessionId: str
- content: Optional[Union[List[ContentItem], str]] = (
+ content: Optional[Union[list[ContentItem], str]] = (
None # List for enqueue, str for remove/popAll
)
diff --git a/claude_code_log/parser.py b/claude_code_log/parser.py
index 23266d3d..89ead392 100644
--- a/claude_code_log/parser.py
+++ b/claude_code_log/parser.py
@@ -3,7 +3,7 @@
import json
import re
-from typing import Any, Callable, Dict, List, Optional, Union, cast, TypeGuard
+from typing import Any, Callable, Optional, Union, cast, TypeGuard
from datetime import datetime
from anthropic.types import Message as AnthropicMessage
@@ -61,26 +61,23 @@
)
-def extract_text_content(content: Union[str, List[ContentItem], None]) -> str:
+def extract_text_content(content: Optional[list[ContentItem]]) -> str:
"""Extract text content from Claude message content structure.
Supports both custom models (TextContent, ThinkingContent) and official
Anthropic SDK types (TextBlock, ThinkingBlock).
"""
- if content is None:
+ if not content:
return ""
- if isinstance(content, list):
- text_parts: List[str] = []
- for item in content:
- # Handle text content (custom TextContent or Anthropic TextBlock)
- if isinstance(item, (TextContent, TextBlock)):
- text_parts.append(item.text)
- # Skip thinking content (custom ThinkingContent or Anthropic ThinkingBlock)
- elif isinstance(item, (ThinkingContent, ThinkingBlock)):
- continue
- return "\n".join(text_parts)
- else:
- return str(content) if content else ""
+ text_parts: list[str] = []
+ for item in content:
+ # Handle text content (custom TextContent or Anthropic TextBlock)
+ if isinstance(item, (TextContent, TextBlock)):
+ text_parts.append(item.text)
+ # Skip thinking content (custom ThinkingContent or Anthropic ThinkingBlock)
+ elif isinstance(item, (ThinkingContent, ThinkingBlock)):
+ continue
+ return "\n".join(text_parts)
def parse_timestamp(timestamp_str: str) -> Optional[datetime]:
@@ -125,7 +122,7 @@ def parse_slash_command(text: str) -> Optional[SlashCommandContent]:
try:
contents_json: Any = json.loads(contents_text)
if isinstance(contents_json, dict) and "text" in contents_json:
- text_dict = cast(Dict[str, Any], contents_json)
+ text_dict = cast(dict[str, Any], contents_json)
text_value = text_dict["text"]
command_contents = str(text_value)
else:
@@ -232,9 +229,9 @@ def parse_ide_notifications(text: str) -> Optional[IdeNotificationContent]:
Returns:
IdeNotificationContent if any tags found, None otherwise
"""
- opened_files: List[IdeOpenedFile] = []
- selections: List[IdeSelection] = []
- diagnostics: List[IdeDiagnostic] = []
+ opened_files: list[IdeOpenedFile] = []
+ selections: list[IdeSelection] = []
+ diagnostics: list[IdeDiagnostic] = []
remaining_text = text
# Pattern 1:
content
@@ -259,7 +256,7 @@ def parse_ide_notifications(text: str) -> Optional[IdeNotificationContent]:
if isinstance(parsed_diagnostics, list):
diagnostics.append(
IdeDiagnostic(
- diagnostics=cast(List[Dict[str, Any]], parsed_diagnostics)
+ diagnostics=cast(list[dict[str, Any]], parsed_diagnostics)
)
)
else:
@@ -288,7 +285,7 @@ def parse_ide_notifications(text: str) -> Optional[IdeNotificationContent]:
def parse_compacted_summary(
- content_list: List[ContentItem],
+ content_list: list[ContentItem],
) -> Optional[CompactedSummaryContent]:
"""Parse compacted session summary from content list.
@@ -351,7 +348,7 @@ def parse_user_memory(text: str) -> Optional[UserMemoryContent]:
def parse_user_message_content(
- content_list: List[ContentItem],
+ content_list: list[ContentItem],
) -> Optional[UserMessageContent]:
"""Parse user message content into a structured content model.
@@ -443,7 +440,7 @@ def is_bash_output(text_content: str) -> bool:
return "
" in text_content or "" in text_content
-def is_warmup_only_session(messages: List[TranscriptEntry], session_id: str) -> bool:
+def is_warmup_only_session(messages: list[TranscriptEntry], session_id: str) -> bool:
"""Check if a session contains only warmup user messages.
A warmup session is one where ALL user messages are literally just "Warmup".
@@ -456,7 +453,7 @@ def is_warmup_only_session(messages: List[TranscriptEntry], session_id: str) ->
Returns:
True if ALL user messages in the session are "Warmup", False otherwise
"""
- user_messages_in_session: List[str] = []
+ user_messages_in_session: list[str] = []
for message in messages:
if (
@@ -494,7 +491,7 @@ def is_assistant_entry(entry: TranscriptEntry) -> TypeGuard[AssistantTranscriptE
# Tool Input Parsing
# =============================================================================
-TOOL_INPUT_MODELS: Dict[str, type[BaseModel]] = {
+TOOL_INPUT_MODELS: dict[str, type[BaseModel]] = {
"Bash": BashInput,
"Read": ReadInput,
"Write": WriteInput,
@@ -515,10 +512,10 @@ def is_assistant_entry(entry: TranscriptEntry) -> TypeGuard[AssistantTranscriptE
# They use defaults for missing fields and skip invalid nested items.
-def _parse_todowrite_lenient(data: Dict[str, Any]) -> TodoWriteInput:
+def _parse_todowrite_lenient(data: dict[str, Any]) -> TodoWriteInput:
"""Parse TodoWrite input leniently, handling malformed data."""
todos_raw = data.get("todos", [])
- valid_todos: List[TodoWriteItem] = []
+ valid_todos: list[TodoWriteItem] = []
for item in todos_raw:
if isinstance(item, dict):
try:
@@ -530,7 +527,7 @@ def _parse_todowrite_lenient(data: Dict[str, Any]) -> TodoWriteInput:
return TodoWriteInput(todos=valid_todos)
-def _parse_bash_lenient(data: Dict[str, Any]) -> BashInput:
+def _parse_bash_lenient(data: dict[str, Any]) -> BashInput:
"""Parse Bash input leniently."""
return BashInput(
command=data.get("command", ""),
@@ -540,7 +537,7 @@ def _parse_bash_lenient(data: Dict[str, Any]) -> BashInput:
)
-def _parse_write_lenient(data: Dict[str, Any]) -> WriteInput:
+def _parse_write_lenient(data: dict[str, Any]) -> WriteInput:
"""Parse Write input leniently."""
return WriteInput(
file_path=data.get("file_path", ""),
@@ -548,7 +545,7 @@ def _parse_write_lenient(data: Dict[str, Any]) -> WriteInput:
)
-def _parse_edit_lenient(data: Dict[str, Any]) -> EditInput:
+def _parse_edit_lenient(data: dict[str, Any]) -> EditInput:
"""Parse Edit input leniently."""
return EditInput(
file_path=data.get("file_path", ""),
@@ -558,10 +555,10 @@ def _parse_edit_lenient(data: Dict[str, Any]) -> EditInput:
)
-def _parse_multiedit_lenient(data: Dict[str, Any]) -> MultiEditInput:
+def _parse_multiedit_lenient(data: dict[str, Any]) -> MultiEditInput:
"""Parse Multiedit input leniently."""
edits_raw = data.get("edits", [])
- valid_edits: List[EditItem] = []
+ valid_edits: list[EditItem] = []
for edit in edits_raw:
if isinstance(edit, dict):
try:
@@ -571,7 +568,7 @@ def _parse_multiedit_lenient(data: Dict[str, Any]) -> MultiEditInput:
return MultiEditInput(file_path=data.get("file_path", ""), edits=valid_edits)
-def _parse_task_lenient(data: Dict[str, Any]) -> TaskInput:
+def _parse_task_lenient(data: dict[str, Any]) -> TaskInput:
"""Parse Task input leniently."""
return TaskInput(
prompt=data.get("prompt", ""),
@@ -583,7 +580,7 @@ def _parse_task_lenient(data: Dict[str, Any]) -> TaskInput:
)
-def _parse_read_lenient(data: Dict[str, Any]) -> ReadInput:
+def _parse_read_lenient(data: dict[str, Any]) -> ReadInput:
"""Parse Read input leniently."""
return ReadInput(
file_path=data.get("file_path", ""),
@@ -592,17 +589,17 @@ def _parse_read_lenient(data: Dict[str, Any]) -> ReadInput:
)
-def _parse_askuserquestion_lenient(data: Dict[str, Any]) -> AskUserQuestionInput:
+def _parse_askuserquestion_lenient(data: dict[str, Any]) -> AskUserQuestionInput:
"""Parse AskUserQuestion input leniently, handling malformed data."""
questions_raw = data.get("questions", [])
- valid_questions: List[AskUserQuestionItem] = []
+ valid_questions: list[AskUserQuestionItem] = []
for q in questions_raw:
if isinstance(q, dict):
- q_dict = cast(Dict[str, Any], q)
+ q_dict = cast(dict[str, Any], q)
try:
# Parse options leniently
options_raw = q_dict.get("options", [])
- valid_options: List[AskUserQuestionOption] = []
+ valid_options: list[AskUserQuestionOption] = []
for opt in options_raw:
if isinstance(opt, dict):
try:
@@ -627,7 +624,7 @@ def _parse_askuserquestion_lenient(data: Dict[str, Any]) -> AskUserQuestionInput
)
-def _parse_exitplanmode_lenient(data: Dict[str, Any]) -> ExitPlanModeInput:
+def _parse_exitplanmode_lenient(data: dict[str, Any]) -> ExitPlanModeInput:
"""Parse ExitPlanMode input leniently."""
return ExitPlanModeInput(
plan=data.get("plan", ""),
@@ -637,7 +634,7 @@ def _parse_exitplanmode_lenient(data: Dict[str, Any]) -> ExitPlanModeInput:
# Mapping of tool names to their lenient parsers
-TOOL_LENIENT_PARSERS: Dict[str, Any] = {
+TOOL_LENIENT_PARSERS: dict[str, Any] = {
"Bash": _parse_bash_lenient,
"Write": _parse_write_lenient,
"Edit": _parse_edit_lenient,
@@ -651,7 +648,7 @@ def _parse_exitplanmode_lenient(data: Dict[str, Any]) -> ExitPlanModeInput:
}
-def parse_tool_input(tool_name: str, input_data: Dict[str, Any]) -> ToolInput:
+def parse_tool_input(tool_name: str, input_data: dict[str, Any]) -> ToolInput:
"""Parse tool input dictionary into a typed model.
Uses strict validation first, then lenient parsing if available.
@@ -729,7 +726,7 @@ def normalize_usage_info(usage_data: Any) -> Optional[UsageInfo]:
# to clarify which content types can appear in which context.
-def _parse_text_content(item_data: Dict[str, Any]) -> ContentItem:
+def _parse_text_content(item_data: dict[str, Any]) -> ContentItem:
"""Parse text content, trying Anthropic types first.
Common to both user and assistant messages.
@@ -740,7 +737,7 @@ def _parse_text_content(item_data: Dict[str, Any]) -> ContentItem:
return TextContent.model_validate(item_data)
-def parse_user_content_item(item_data: Dict[str, Any]) -> ContentItem:
+def parse_user_content_item(item_data: dict[str, Any]) -> ContentItem:
"""Parse a content item from a UserTranscriptEntry.
User messages can contain:
@@ -764,7 +761,7 @@ def parse_user_content_item(item_data: Dict[str, Any]) -> ContentItem:
return TextContent(type="text", text=str(item_data))
-def parse_assistant_content_item(item_data: Dict[str, Any]) -> ContentItem:
+def parse_assistant_content_item(item_data: dict[str, Any]) -> ContentItem:
"""Parse a content item from an AssistantTranscriptEntry.
Assistant messages can contain:
@@ -798,7 +795,7 @@ def parse_assistant_content_item(item_data: Dict[str, Any]) -> ContentItem:
return TextContent(type="text", text=str(item_data))
-def parse_content_item(item_data: Dict[str, Any]) -> ContentItem:
+def parse_content_item(item_data: dict[str, Any]) -> ContentItem:
"""Parse a content item (generic fallback).
For cases where the entry type is unknown. Handles all content types.
@@ -842,9 +839,12 @@ def parse_content_item(item_data: Dict[str, Any]) -> ContentItem:
def parse_message_content(
content_data: Any,
- item_parser: Callable[[Dict[str, Any]], ContentItem] = parse_content_item,
-) -> Union[str, List[ContentItem]]:
- """Parse message content, handling both string and list formats.
+ item_parser: Callable[[dict[str, Any]], ContentItem] = parse_content_item,
+) -> list[ContentItem]:
+ """Parse message content, normalizing to a list of ContentItems.
+
+ Always returns a list for consistent downstream handling. String content
+ is wrapped in a TextContent item.
Args:
content_data: Raw content data (string or list of items)
@@ -853,12 +853,19 @@ def parse_message_content(
parse_assistant_content_item for type-specific parsing.
"""
if isinstance(content_data, str):
- return content_data
+ return [TextContent(type="text", text=content_data)]
elif isinstance(content_data, list):
- content_list = cast(List[Dict[str, Any]], content_data)
- return [item_parser(item) for item in content_list]
+ content_list = cast(list[Any], content_data)
+ result: list[ContentItem] = []
+ for item in content_list:
+ if isinstance(item, dict):
+ result.append(item_parser(cast(dict[str, Any], item)))
+ else:
+ # Non-dict items (e.g., raw strings) become TextContent
+ result.append(TextContent(type="text", text=str(item)))
+ return result
else:
- return str(content_data)
+ return [TextContent(type="text", text=str(content_data))]
# =============================================================================
@@ -866,7 +873,7 @@ def parse_message_content(
# =============================================================================
-def parse_transcript_entry(data: Dict[str, Any]) -> TranscriptEntry:
+def parse_transcript_entry(data: dict[str, Any]) -> TranscriptEntry:
"""
Parse a JSON dictionary into the appropriate TranscriptEntry type.
@@ -897,14 +904,14 @@ def parse_transcript_entry(data: Dict[str, Any]) -> TranscriptEntry:
data_copy["toolUseResult"], list
):
# Check if it's a list of content items (MCP tool results)
- tool_use_result = cast(List[Any], data_copy["toolUseResult"])
+ tool_use_result = cast(list[Any], data_copy["toolUseResult"])
if (
tool_use_result
and isinstance(tool_use_result[0], dict)
and "type" in tool_use_result[0]
):
data_copy["toolUseResult"] = [
- parse_content_item(cast(Dict[str, Any], item))
+ parse_content_item(cast(dict[str, Any], item))
for item in tool_use_result
if isinstance(item, dict)
]
diff --git a/claude_code_log/renderer.py b/claude_code_log/renderer.py
index 5b375380..680d558e 100644
--- a/claude_code_log/renderer.py
+++ b/claude_code_log/renderer.py
@@ -4,7 +4,7 @@
import time
from dataclasses import dataclass, replace
from pathlib import Path
-from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING
+from typing import Any, Optional, Tuple, TYPE_CHECKING
if TYPE_CHECKING:
from .cache import CacheManager
@@ -181,7 +181,7 @@ def __init__(
has_markdown: bool = False,
message_title: Optional[str] = None,
message_id: Optional[str] = None,
- ancestry: Optional[List[str]] = None,
+ ancestry: Optional[list[str]] = None,
has_children: bool = False,
uuid: Optional[str] = None,
parent_uuid: Optional[str] = None,
@@ -228,7 +228,7 @@ def __init__(
self.pair_role: Optional[str] = None # "pair_first", "pair_last", "pair_middle"
self.pair_duration: Optional[str] = None # Duration for pair_last messages
# Children for tree-based rendering (future use)
- self.children: List["TemplateMessage"] = []
+ self.children: list["TemplateMessage"] = []
def get_immediate_children_label(self) -> str:
"""Generate human-readable label for immediate children."""
@@ -238,26 +238,26 @@ def get_total_descendants_label(self) -> str:
"""Generate human-readable label for all descendants."""
return _format_type_counts(self.total_descendants_by_type)
- def flatten(self) -> List["TemplateMessage"]:
+ def flatten(self) -> list["TemplateMessage"]:
"""Recursively flatten this message and all children into a list.
Returns a list with this message followed by all descendants in
depth-first order. This provides backward compatibility with the
flat-list template rendering approach.
"""
- result: List["TemplateMessage"] = [self]
+ result: list["TemplateMessage"] = [self]
for child in self.children:
result.extend(child.flatten())
return result
@staticmethod
- def flatten_all(messages: List["TemplateMessage"]) -> List["TemplateMessage"]:
+ def flatten_all(messages: list["TemplateMessage"]) -> list["TemplateMessage"]:
"""Flatten a list of root messages into a single flat list.
Useful for converting a tree structure back to a flat list for
templates that expect the traditional flat message list.
"""
- result: List["TemplateMessage"] = []
+ result: list["TemplateMessage"] = []
for message in messages:
result.extend(message.flatten())
return result
@@ -266,7 +266,7 @@ def flatten_all(messages: List["TemplateMessage"]) -> List["TemplateMessage"]:
class TemplateProject:
"""Structured project data for template rendering."""
- def __init__(self, project_data: Dict[str, Any]):
+ def __init__(self, project_data: dict[str, Any]):
self.name = project_data["name"]
self.html_file = project_data["html_file"]
self.jsonl_count = project_data["jsonl_count"]
@@ -318,7 +318,7 @@ def __init__(self, project_data: Dict[str, Any]):
# Format token usage
self.token_summary = ""
if self.total_input_tokens > 0 or self.total_output_tokens > 0:
- token_parts: List[str] = []
+ token_parts: list[str] = []
if self.total_input_tokens > 0:
token_parts.append(f"Input: {self.total_input_tokens}")
if self.total_output_tokens > 0:
@@ -335,7 +335,7 @@ def __init__(self, project_data: Dict[str, Any]):
class TemplateSummary:
"""Summary statistics for template rendering."""
- def __init__(self, project_summaries: List[Dict[str, Any]]):
+ def __init__(self, project_summaries: list[dict[str, Any]]):
self.total_projects = len(project_summaries)
self.total_jsonl = sum(p["jsonl_count"] for p in project_summaries)
self.total_messages = sum(p["message_count"] for p in project_summaries)
@@ -400,7 +400,7 @@ def __init__(self, project_summaries: List[Dict[str, Any]]):
# Format token usage summary
self.token_summary = ""
if self.total_input_tokens > 0 or self.total_output_tokens > 0:
- token_parts: List[str] = []
+ token_parts: list[str] = []
if self.total_input_tokens > 0:
token_parts.append(f"Input: {self.total_input_tokens}")
if self.total_output_tokens > 0:
@@ -418,8 +418,8 @@ def __init__(self, project_summaries: List[Dict[str, Any]]):
def generate_template_messages(
- messages: List[TranscriptEntry],
-) -> Tuple[List[TemplateMessage], List[Dict[str, Any]]]:
+ messages: list[TranscriptEntry],
+) -> Tuple[list[TemplateMessage], list[dict[str, Any]]]:
"""Generate template messages and session navigation from transcript messages.
This is the format-neutral rendering step that produces data structures
@@ -515,14 +515,14 @@ def generate_template_messages(
# -- Session Utilities --------------------------------------------------------
-def prepare_session_summaries(messages: List[TranscriptEntry]) -> None:
+def prepare_session_summaries(messages: list[TranscriptEntry]) -> None:
"""Pre-process messages to find and attach session summaries.
Modifies messages in place by attaching _session_summary attribute.
"""
- session_summaries: Dict[str, str] = {}
- uuid_to_session: Dict[str, str] = {}
- uuid_to_session_backup: Dict[str, str] = {}
+ session_summaries: dict[str, str] = {}
+ uuid_to_session: dict[str, str] = {}
+ uuid_to_session_backup: dict[str, str] = {}
# Build mapping from message UUID to session ID
for message in messages:
@@ -558,9 +558,9 @@ def prepare_session_summaries(messages: List[TranscriptEntry]) -> None:
def prepare_session_navigation(
- sessions: Dict[str, Dict[str, Any]],
- session_order: List[str],
-) -> List[Dict[str, Any]]:
+ sessions: dict[str, dict[str, Any]],
+ session_order: list[str],
+) -> list[dict[str, Any]]:
"""Prepare session navigation data for template rendering.
Args:
@@ -570,7 +570,7 @@ def prepare_session_navigation(
Returns:
List of session navigation dicts for template rendering
"""
- session_nav: List[Dict[str, Any]] = []
+ session_nav: list[dict[str, Any]] = []
for session_id in session_order:
session_info = sessions[session_id]
@@ -592,7 +592,7 @@ def prepare_session_navigation(
total_cache_read = session_info["total_cache_read_tokens"]
if total_input > 0 or total_output > 0:
- token_parts: List[str] = []
+ token_parts: list[str] = []
if total_input > 0:
token_parts.append(f"Input: {total_input}")
if total_output > 0:
@@ -668,7 +668,7 @@ def _process_local_command_output(
# If parsing fails, content will be None and caller will handle fallback
message_type = "user"
- message_title = "Command Output"
+ message_title = "Command output"
return modifiers, content, message_type, message_title
@@ -683,7 +683,7 @@ def _process_bash_input(
# If parsing fails, content will be None and caller will handle fallback
message_type = "bash-input"
- message_title = "Bash"
+ message_title = "Bash command"
return modifiers, content, message_type, message_title
@@ -697,13 +697,13 @@ def _process_bash_output(
content = parse_bash_output(text_content)
# If parsing fails, content will be None - caller/renderer handles empty output
- message_type = "bash"
- message_title = "Bash"
+ message_type = "bash-output"
+ message_title = "Command output"
return modifiers, content, message_type, message_title
def _process_regular_message(
- text_only_content: List[ContentItem],
+ text_only_content: list[ContentItem],
message_type: str,
is_sidechain: bool,
is_meta: bool = False,
@@ -761,7 +761,7 @@ def _process_regular_message(
if is_sidechain:
# Update message title for display (only non-user types reach here)
if not is_compacted:
- message_title = "🔗 Sub-assistant"
+ message_title = "Sub-assistant"
modifiers = MessageModifiers(
is_sidechain=is_sidechain,
@@ -849,7 +849,7 @@ class ToolItemResult:
def _process_tool_use_item(
tool_item: ContentItem,
- tool_use_context: Dict[str, ToolUseContent],
+ tool_use_context: dict[str, ToolUseContent],
) -> Optional[ToolItemResult]:
"""Process a tool_use content item.
@@ -891,7 +891,7 @@ def _process_tool_use_item(
def _process_tool_result_item(
tool_item: ContentItem,
- tool_use_context: Dict[str, ToolUseContent],
+ tool_use_context: dict[str, ToolUseContent],
) -> Optional[ToolItemResult]:
"""Process a tool_result content item.
@@ -938,7 +938,7 @@ def _process_tool_result_item(
pending_dedup: Optional[str] = None
if result_tool_name == "Task":
# Extract text content from tool result
- # Note: tool_result.content can be str or List[Dict[str, Any]]
+ # Note: tool_result.content can be str or list[dict[str, Any]]
if isinstance(tool_result.content, str):
task_result_content = tool_result.content.strip()
else:
@@ -1019,24 +1019,24 @@ class PairingIndices:
"""
# (session_id, tool_use_id) -> message index for tool_use messages
- tool_use: Dict[tuple[str, str], int]
+ tool_use: dict[tuple[str, str], int]
# (session_id, tool_use_id) -> message index for tool_result messages
- tool_result: Dict[tuple[str, str], int]
+ tool_result: dict[tuple[str, str], int]
# uuid -> message index for system messages (parent-child pairing)
- uuid: Dict[str, int]
+ uuid: dict[str, int]
# parent_uuid -> message index for slash-command messages
- slash_command_by_parent: Dict[str, int]
+ slash_command_by_parent: dict[str, int]
-def _build_pairing_indices(messages: List[TemplateMessage]) -> PairingIndices:
+def _build_pairing_indices(messages: list[TemplateMessage]) -> PairingIndices:
"""Build indices for efficient message pairing lookups.
Single pass through messages to build all indices needed for pairing.
"""
- tool_use_index: Dict[tuple[str, str], int] = {}
- tool_result_index: Dict[tuple[str, str], int] = {}
- uuid_index: Dict[str, int] = {}
- slash_command_by_parent: Dict[str, int] = {}
+ tool_use_index: dict[tuple[str, str], int] = {}
+ tool_result_index: dict[tuple[str, str], int] = {}
+ uuid_index: dict[str, int] = {}
+ slash_command_by_parent: dict[str, int] = {}
for i, msg in enumerate(messages):
# Index tool_use and tool_result by (session_id, tool_use_id)
@@ -1104,7 +1104,7 @@ def _try_pair_adjacent(
def _try_pair_by_index(
current: TemplateMessage,
- messages: List[TemplateMessage],
+ messages: list[TemplateMessage],
indices: PairingIndices,
) -> None:
"""Try to pair current message with another using index lookups.
@@ -1134,7 +1134,7 @@ def _try_pair_by_index(
_mark_pair(current, slash_msg)
-def _identify_message_pairs(messages: List[TemplateMessage]) -> None:
+def _identify_message_pairs(messages: list[TemplateMessage]) -> None:
"""Identify and mark paired messages (e.g., command + output, tool use + result).
Modifies messages in-place by setting is_paired and pair_role fields.
@@ -1173,7 +1173,7 @@ def _identify_message_pairs(messages: List[TemplateMessage]) -> None:
i += 1
-def _reorder_paired_messages(messages: List[TemplateMessage]) -> List[TemplateMessage]:
+def _reorder_paired_messages(messages: list[TemplateMessage]) -> list[TemplateMessage]:
"""Reorder messages so paired messages are adjacent while preserving chronological order.
- Unpaired messages and first messages in pairs maintain chronological order
@@ -1189,11 +1189,11 @@ def _reorder_paired_messages(messages: List[TemplateMessage]) -> List[TemplateMe
# Build index of pair_last messages by (session_id, tool_use_id)
# Session ID is included to prevent cross-session pairing when sessions are resumed
- pair_last_index: Dict[
+ pair_last_index: dict[
tuple[str, str], int
] = {} # (session_id, tool_use_id) -> message index
# Index slash-command pair_last messages by parent_uuid
- slash_command_pair_index: Dict[str, int] = {} # parent_uuid -> message index
+ slash_command_pair_index: dict[str, int] = {} # parent_uuid -> message index
for i, msg in enumerate(messages):
if (
@@ -1214,7 +1214,7 @@ def _reorder_paired_messages(messages: List[TemplateMessage]) -> List[TemplateMe
slash_command_pair_index[msg.parent_uuid] = i
# Create reordered list
- reordered: List[TemplateMessage] = []
+ reordered: list[TemplateMessage] = []
skip_indices: set[int] = set()
for i, msg in enumerate(messages):
@@ -1343,7 +1343,7 @@ def _get_message_hierarchy_level(msg: TemplateMessage) -> int:
return 1
-def _build_message_hierarchy(messages: List[TemplateMessage]) -> None:
+def _build_message_hierarchy(messages: list[TemplateMessage]) -> None:
"""Build message_id and ancestry for all messages based on their current order.
This should be called after all reordering operations (pair reordering, sidechain
@@ -1355,7 +1355,7 @@ def _build_message_hierarchy(messages: List[TemplateMessage]) -> None:
Args:
messages: List of template messages in their final order (modified in place)
"""
- hierarchy_stack: List[tuple[int, str]] = []
+ hierarchy_stack: list[tuple[int, str]] = []
message_id_counter = 0
for message in messages:
@@ -1389,7 +1389,7 @@ def _build_message_hierarchy(messages: List[TemplateMessage]) -> None:
message.ancestry = ancestry
-def _mark_messages_with_children(messages: List[TemplateMessage]) -> None:
+def _mark_messages_with_children(messages: list[TemplateMessage]) -> None:
"""Mark messages that have children and calculate descendant counts.
Efficiently calculates:
@@ -1445,7 +1445,7 @@ def _mark_messages_with_children(messages: List[TemplateMessage]) -> None:
)
-def _build_message_tree(messages: List[TemplateMessage]) -> List[TemplateMessage]:
+def _build_message_tree(messages: list[TemplateMessage]) -> list[TemplateMessage]:
"""Build tree structure by populating children fields based on ancestry.
This function takes a flat list of messages (with message_id and ancestry
@@ -1475,7 +1475,7 @@ def _build_message_tree(messages: List[TemplateMessage]) -> List[TemplateMessage
message.children = []
# Collect root messages (those with no ancestry)
- root_messages: List[TemplateMessage] = []
+ root_messages: list[TemplateMessage] = []
# Populate children based on ancestry
for message in messages:
@@ -1496,8 +1496,8 @@ def _build_message_tree(messages: List[TemplateMessage]) -> List[TemplateMessage
def _reorder_session_template_messages(
- messages: List[TemplateMessage],
-) -> List[TemplateMessage]:
+ messages: list[TemplateMessage],
+) -> list[TemplateMessage]:
"""Reorder template messages to group all messages under their correct session headers.
When a user resumes session A into session B, Claude Code copies messages from
@@ -1516,8 +1516,8 @@ def _reorder_session_template_messages(
Reordered messages with all messages grouped under their session headers
"""
# First pass: extract session headers and group non-header messages by session_id
- session_headers: List[TemplateMessage] = []
- session_messages_map: Dict[str, List[TemplateMessage]] = {}
+ session_headers: list[TemplateMessage] = []
+ session_messages_map: dict[str, list[TemplateMessage]] = {}
for message in messages:
if message.is_session_header:
@@ -1537,7 +1537,7 @@ def _reorder_session_template_messages(
return messages
# Second pass: for each session header, insert all messages with that session_id
- result: List[TemplateMessage] = []
+ result: list[TemplateMessage] = []
used_sessions: set[str] = set()
for header in session_headers:
@@ -1558,8 +1558,8 @@ def _reorder_session_template_messages(
def _reorder_sidechain_template_messages(
- messages: List[TemplateMessage],
-) -> List[TemplateMessage]:
+ messages: list[TemplateMessage],
+) -> list[TemplateMessage]:
"""Reorder template messages to place sidechains immediately after their Task results.
When parallel Task agents run, their sidechain messages may appear in arbitrary
@@ -1581,8 +1581,8 @@ def _reorder_sidechain_template_messages(
Reordered messages with sidechains properly placed after their Task results
"""
# First pass: extract sidechains grouped by agent_id
- main_messages: List[TemplateMessage] = []
- sidechain_map: Dict[str, List[TemplateMessage]] = {}
+ main_messages: list[TemplateMessage] = []
+ sidechain_map: dict[str, list[TemplateMessage]] = {}
for message in messages:
is_sidechain = message.modifiers.is_sidechain
@@ -1602,7 +1602,7 @@ def _reorder_sidechain_template_messages(
# Second pass: insert sidechains after their Task result messages
# Also perform deduplication of sidechain assistants vs Task results
- result: List[TemplateMessage] = []
+ result: list[TemplateMessage] = []
used_agents: set[str] = set()
for message in main_messages:
@@ -1662,7 +1662,7 @@ def _reorder_sidechain_template_messages(
return result
-def _filter_messages(messages: List[TranscriptEntry]) -> List[TranscriptEntry]:
+def _filter_messages(messages: list[TranscriptEntry]) -> list[TranscriptEntry]:
"""Filter messages to those that should be rendered.
This function filters out:
@@ -1680,7 +1680,7 @@ def _filter_messages(messages: List[TranscriptEntry]) -> List[TranscriptEntry]:
Returns:
Filtered list of messages that should be rendered
"""
- filtered: List[TranscriptEntry] = []
+ filtered: list[TranscriptEntry] = []
for message in messages:
message_type = message.type
@@ -1700,28 +1700,25 @@ def _filter_messages(messages: List[TranscriptEntry]) -> List[TranscriptEntry]:
continue
# Get message content for filtering checks
+ message_content: list[ContentItem]
if isinstance(message, QueueOperationTranscriptEntry):
- message_content = message.content if message.content else []
+ content = message.content
+ message_content = content if isinstance(content, list) else []
else:
- message_content = message.message.content # type: ignore
+ message_content = message.message.content # type: ignore[union-attr]
text_content = extract_text_content(message_content)
# Skip if no meaningful content
if not text_content.strip():
# Check for tool items
- if isinstance(message_content, list):
- has_tool_items = any(
- isinstance(
- item, (ToolUseContent, ToolResultContent, ThinkingContent)
- )
- or getattr(item, "type", None)
- in ("tool_use", "tool_result", "thinking")
- for item in message_content
- )
- if not has_tool_items:
- continue
- else:
+ has_tool_items = any(
+ isinstance(item, (ToolUseContent, ToolResultContent, ThinkingContent))
+ or getattr(item, "type", None)
+ in ("tool_use", "tool_result", "thinking")
+ for item in message_content
+ )
+ if not has_tool_items:
continue
# Skip messages that should be filtered out
@@ -1730,14 +1727,13 @@ def _filter_messages(messages: List[TranscriptEntry]) -> List[TranscriptEntry]:
# Skip sidechain user messages that are just prompts (no tool results)
if message_type == MessageType.USER and getattr(message, "isSidechain", False):
- if isinstance(message_content, list):
- has_tool_results = any(
- getattr(item, "type", None) == "tool_result"
- or isinstance(item, ToolResultContent)
- for item in message_content
- )
- if not has_tool_results:
- continue
+ has_tool_results = any(
+ getattr(item, "type", None) == "tool_result"
+ or isinstance(item, ToolResultContent)
+ for item in message_content
+ )
+ if not has_tool_results:
+ continue
# Message passes all filters
filtered.append(message)
@@ -1746,10 +1742,10 @@ def _filter_messages(messages: List[TranscriptEntry]) -> List[TranscriptEntry]:
def _collect_session_info(
- messages: List[TranscriptEntry],
+ messages: list[TranscriptEntry],
) -> tuple[
- Dict[str, Dict[str, Any]], # sessions
- List[str], # session_order
+ dict[str, dict[str, Any]], # sessions
+ list[str], # session_order
set[str], # show_tokens_for_message
]:
"""Collect session metadata and token tracking from pre-filtered messages.
@@ -1771,8 +1767,8 @@ def _collect_session_info(
- session_order: List of session IDs in chronological order
- show_tokens_for_message: Set of message UUIDs that should display tokens
"""
- sessions: Dict[str, Dict[str, Any]] = {}
- session_order: List[str] = []
+ sessions: dict[str, dict[str, Any]] = {}
+ session_order: list[str] = []
# Track requestIds to avoid double-counting token usage
seen_request_ids: set[str] = set()
@@ -1867,10 +1863,10 @@ def _collect_session_info(
def _render_messages(
- messages: List[TranscriptEntry],
- sessions: Dict[str, Dict[str, Any]],
+ messages: list[TranscriptEntry],
+ sessions: dict[str, dict[str, Any]],
show_tokens_for_message: set[str],
-) -> List[TemplateMessage]:
+) -> list[TemplateMessage]:
"""Pass 2: Render pre-filtered messages to TemplateMessage objects.
This pass creates the actual TemplateMessage objects for rendering:
@@ -1894,19 +1890,19 @@ def _render_messages(
seen_sessions: set[str] = set()
# Build mapping of tool_use_id to ToolUseContent for specialized tool result rendering
- tool_use_context: Dict[str, ToolUseContent] = {}
+ tool_use_context: dict[str, ToolUseContent] = {}
# Process messages into template-friendly format
- template_messages: List[TemplateMessage] = []
+ template_messages: list[TemplateMessage] = []
# Per-message timing tracking
- message_timings: List[
+ message_timings: list[
tuple[float, str, int, str]
] = [] # (duration, message_type, index, uuid)
# Track expensive operations
- markdown_timings: List[tuple[float, str]] = [] # (duration, context_uuid)
- pygments_timings: List[tuple[float, str]] = [] # (duration, context_uuid)
+ markdown_timings: list[tuple[float, str]] = [] # (duration, context_uuid)
+ pygments_timings: list[tuple[float, str]] = [] # (duration, context_uuid)
# Initialize timing tracking
set_timing_var("_markdown_timings", markdown_timings)
@@ -1938,11 +1934,11 @@ def _render_messages(
text_content = extract_text_content(message_content) # type: ignore[arg-type]
# Separate tool/thinking/image content from text content
- tool_items: List[ContentItem] = []
- text_only_content: List[ContentItem] = []
+ tool_items: list[ContentItem] = []
+ text_only_content: list[ContentItem] = []
if isinstance(message_content, list):
- text_only_items: List[ContentItem] = []
+ text_only_items: list[ContentItem] = []
for item in message_content: # type: ignore[union-attr]
item_type = getattr(item, "type", None) # type: ignore[arg-type]
is_image = isinstance(item, ImageContent) or item_type == "image"
@@ -2199,8 +2195,8 @@ def _render_messages(
def prepare_projects_index(
- project_summaries: List[Dict[str, Any]],
-) -> tuple[List["TemplateProject"], "TemplateSummary"]:
+ project_summaries: list[dict[str, Any]],
+) -> tuple[list["TemplateProject"], "TemplateSummary"]:
"""Prepare project data for rendering in any format.
Args:
@@ -2222,7 +2218,7 @@ def prepare_projects_index(
def title_for_projects_index(
- project_summaries: List[Dict[str, Any]],
+ project_summaries: list[dict[str, Any]],
from_date: Optional[str] = None,
to_date: Optional[str] = None,
) -> str:
@@ -2286,7 +2282,7 @@ def title_for_projects_index(
# Add date range suffix if provided
if from_date or to_date:
- date_range_parts: List[str] = []
+ date_range_parts: list[str] = []
if from_date:
date_range_parts.append(f"from {from_date}")
if to_date:
@@ -2308,7 +2304,7 @@ class Renderer:
def generate(
self,
- messages: List[TranscriptEntry],
+ messages: list[TranscriptEntry],
title: Optional[str] = None,
combined_transcript_link: Optional[str] = None,
) -> Optional[str]:
@@ -2320,7 +2316,7 @@ def generate(
def generate_session(
self,
- messages: List[TranscriptEntry],
+ messages: list[TranscriptEntry],
session_id: str,
title: Optional[str] = None,
cache_manager: Optional["CacheManager"] = None,
@@ -2333,7 +2329,7 @@ def generate_session(
def generate_projects_index(
self,
- project_summaries: List[Dict[str, Any]],
+ project_summaries: list[dict[str, Any]],
from_date: Optional[str] = None,
to_date: Optional[str] = None,
) -> Optional[str]:
diff --git a/claude_code_log/renderer_timings.py b/claude_code_log/renderer_timings.py
index fb5111cb..96aff4b7 100644
--- a/claude_code_log/renderer_timings.py
+++ b/claude_code_log/renderer_timings.py
@@ -7,7 +7,7 @@
import os
import time
from contextlib import contextmanager
-from typing import List, Tuple, Iterator, Any, Dict, Callable, Union, Optional
+from typing import Tuple, Iterator, Any, Callable, Union, Optional
# Performance debugging - enabled via CLAUDE_CODE_LOG_DEBUG_TIMING environment variable
# Set to "1", "true", or "yes" to enable timing output
@@ -18,7 +18,7 @@
)
# Global timing data storage
-_timing_data: Dict[str, Any] = {}
+_timing_data: dict[str, Any] = {}
def set_timing_var(name: str, value: Any) -> None:
@@ -111,8 +111,8 @@ def timing_stat(list_name: str) -> Iterator[None]:
def report_timing_statistics(
- message_timings: List[Tuple[float, str, int, str]],
- operation_timings: List[Tuple[str, List[Tuple[float, str]]]],
+ message_timings: list[Tuple[float, str, int, str]],
+ operation_timings: list[Tuple[str, list[Tuple[float, str]]]],
) -> None:
"""Report timing statistics for message rendering.
diff --git a/claude_code_log/tui.py b/claude_code_log/tui.py
index 861a9db0..f3a69b5b 100644
--- a/claude_code_log/tui.py
+++ b/claude_code_log/tui.py
@@ -5,7 +5,7 @@
import webbrowser
from datetime import datetime
from pathlib import Path
-from typing import ClassVar, Dict, Optional, cast
+from typing import ClassVar, Optional, cast
from textual.app import App, ComposeResult
from textual.binding import Binding, BindingType
@@ -230,7 +230,7 @@ class SessionBrowser(App[Optional[str]]):
is_expanded: reactive[bool] = reactive(False)
project_path: Path
cache_manager: CacheManager
- sessions: Dict[str, SessionCacheData]
+ sessions: dict[str, SessionCacheData]
def __init__(self, project_path: Path):
"""Initialize the session browser with a project path."""
diff --git a/claude_code_log/utils.py b/claude_code_log/utils.py
index 82f3a733..e0ad25fd 100644
--- a/claude_code_log/utils.py
+++ b/claude_code_log/utils.py
@@ -2,9 +2,9 @@
"""Utility functions for message filtering and processing."""
import re
-from datetime import datetime
+from datetime import datetime, timezone
from pathlib import Path
-from typing import Dict, List, Optional, Union
+from typing import Optional
from claude_code_log.cache import SessionCacheData
from .models import ContentItem, TextContent, TranscriptEntry, UserTranscriptEntry
@@ -26,15 +26,7 @@ def format_timestamp(timestamp_str: str | None) -> str:
dt = datetime.fromisoformat(timestamp_str.replace("Z", "+00:00"))
# Convert to UTC if timezone-aware
if dt.tzinfo is not None:
- utc_timetuple = dt.utctimetuple()
- dt = datetime(
- utc_timetuple.tm_year,
- utc_timetuple.tm_mon,
- utc_timetuple.tm_mday,
- utc_timetuple.tm_hour,
- utc_timetuple.tm_min,
- utc_timetuple.tm_sec,
- )
+ dt = dt.astimezone(timezone.utc).replace(tzinfo=None)
return dt.strftime("%Y-%m-%d %H:%M:%S")
except (ValueError, AttributeError):
return timestamp_str
@@ -63,7 +55,7 @@ def format_timestamp_range(first_timestamp: str, last_timestamp: str) -> str:
def get_project_display_name(
- project_dir_name: str, working_directories: Optional[List[str]] = None
+ project_dir_name: str, working_directories: Optional[list[str]] = None
) -> str:
"""Get the display name for a project based on working directories.
@@ -164,24 +156,19 @@ def create_session_preview(text_content: str) -> str:
return preview_content
-def extract_text_content_length(content: Union[str, List[ContentItem]]) -> int:
+def extract_text_content_length(content: list[ContentItem]) -> int:
"""Get the length of text content for quick checks without full extraction."""
- if isinstance(content, str):
- return len(content.strip())
-
- # For list content, count only text items
total_length = 0
for item in content:
# Only count TextContent items, skip tool/thinking/image items
if isinstance(item, TextContent):
total_length += len(item.text.strip())
-
return total_length
def extract_working_directories(
- entries: List[TranscriptEntry] | List[SessionCacheData],
-) -> List[str]:
+ entries: list[TranscriptEntry] | list[SessionCacheData],
+) -> list[str]:
"""Extract unique working directories from a list of entries.
Ordered by timestamp (most recent first).
@@ -314,7 +301,7 @@ def _extract_file_path(content: str) -> str | None:
return text_content
-def get_warmup_session_ids(messages: List[TranscriptEntry]) -> set[str]:
+def get_warmup_session_ids(messages: list[TranscriptEntry]) -> set[str]:
"""Get set of session IDs that are warmup-only sessions.
Pre-computes warmup status for all sessions for efficiency (O(n) once,
@@ -329,7 +316,7 @@ def get_warmup_session_ids(messages: List[TranscriptEntry]) -> set[str]:
from .parser import extract_text_content
# Group user message text by session
- session_user_messages: Dict[str, List[str]] = {}
+ session_user_messages: dict[str, list[str]] = {}
for message in messages:
if isinstance(message, UserTranscriptEntry) and hasattr(message, "message"):
diff --git a/dev-docs/FOLD_STATE_DIAGRAM.md b/dev-docs/FOLD_STATE_DIAGRAM.md
index a8af2c2d..9ac5b106 100644
--- a/dev-docs/FOLD_STATE_DIAGRAM.md
+++ b/dev-docs/FOLD_STATE_DIAGRAM.md
@@ -273,6 +273,6 @@ Paired messages (tool_use + tool_result, thinking + assistant) are handled as un
## References
-- [renderer.py](../claude_code_log/renderer.py) - Hierarchy functions (lines 2698-2850)
-- [templates/components/fold_bar.html](../claude_code_log/templates/components/fold_bar.html) - JavaScript controls
-- [css-classes.md](css-classes.md) - CSS class documentation
+- [renderer.py](../claude_code_log/renderer.py) - Message hierarchy functions (lines 1285-1493)
+- [transcript.html](../claude_code_log/html/templates/transcript.html) - Fold/unfold JavaScript controls
+- [message_styles.css](../claude_code_log/html/templates/components/message_styles.css) - Fold state CSS styles
diff --git a/dev-docs/MESSAGE_REFACTORING.md b/dev-docs/MESSAGE_REFACTORING.md
index 230f5856..90881e40 100755
--- a/dev-docs/MESSAGE_REFACTORING.md
+++ b/dev-docs/MESSAGE_REFACTORING.md
@@ -51,7 +51,7 @@ This branch implements tree-based message rendering. See [TEMPLATE_MESSAGE_CHILD
- Template unchanged - still receives flat list (Phase 3 future work)
**Architecture:**
-```
+```text
TranscriptEntry[] → generate_template_messages() → root_messages (tree)
↓
HtmlRenderer._flatten_preorder() → flat_list
@@ -138,7 +138,7 @@ Adds text/markdown/chat output formats via new `content_extractor.py` module.
**Goal**: Extract ANSI color conversion to dedicated module
**Changes**:
-- ✅ Created `claude_code_log/ansi_colors.py` (261 lines)
+- ✅ Created `claude_code_log/html/ansi_colors.py` (261 lines)
- ✅ Moved `_convert_ansi_to_html()` → `convert_ansi_to_html()`
- ✅ Updated imports in `renderer.py`
- ✅ Updated test imports in `test_ansi_colors.py`
@@ -150,7 +150,7 @@ Adds text/markdown/chat output formats via new `content_extractor.py` module.
**Goal**: Extract code-related rendering (Pygments highlighting, diff rendering) to dedicated module
**Changes**:
-- ✅ Created `claude_code_log/renderer_code.py` (330 lines)
+- ✅ Created `claude_code_log/html/renderer_code.py` (330 lines)
- ✅ Moved `_highlight_code_with_pygments()` → `highlight_code_with_pygments()`
- ✅ Moved `_truncate_highlighted_preview()` → `truncate_highlighted_preview()`
- ✅ Moved `_render_single_diff()` → `render_single_diff()`
@@ -392,10 +392,7 @@ The original plan called for two-stage (parse + render) splits. This was achieve
- **Tree-first architecture** means HtmlRenderer traverses tree and formats during pre-order walk
**Step 9 Status**:
-`generate_projects_index_html()` remains in renderer.py because:
-- Mixes format-neutral data preparation (TemplateProject/TemplateSummary) with HTML generation
-- Moving just the HTML part would require restructuring the data flow
-- Low priority: function works correctly and is ~100 lines
+`generate_projects_index_html()` is now in `claude_code_log/html/renderer.py` (thin wrapper over `HtmlRenderer.generate_projects_index()`).
**Dependencies**:
- Requires Phase 9 (type safety) for clean interfaces ✅
diff --git a/dev-docs/messages.md b/dev-docs/messages.md
index ce455b47..ee76bc65 100644
--- a/dev-docs/messages.md
+++ b/dev-docs/messages.md
@@ -16,7 +16,7 @@ This document maps input types to their intermediate and output representations.
## Data Flow: From Transcript Entries to Rendered Messages
-```
+```text
JSONL Parsing (parser.py)
│
├── UserTranscriptEntry
@@ -168,6 +168,11 @@ Based on flags and tag patterns in `TextContent`, user text messages are classif
}
```
+> **Note**: These are "caveat" messages that precede slash command messages (with
+> `` tags). They instruct the LLM to not respond to the following
+> local command output unless explicitly asked. The actual slash command details
+> appear in the subsequent message with tags.
+
### Slash Command (Tags)
- **Condition**: Contains `` tags
@@ -183,6 +188,11 @@ class SlashCommandContent(MessageContent):
command_contents: str # Content inside command
```
+> **Note**: Both built-in commands (e.g., `/init`, `/model`, `/context`) and
+> user-defined commands (e.g., `/my-command` from `~/.claude/commands/my-command.md`)
+> use the same `` tag format. There is no field in the JSONL to
+> differentiate between them.
+
### Command Output
- **Condition**: Contains `` tags
@@ -201,7 +211,7 @@ class CommandOutputContent(MessageContent):
- **Condition**: Contains `` tags
- **Content Model**: `BashInputContent`
-- **CSS Class**: Part of bash tool pairing
+- **CSS Class**: `bash-input` (filtered by User)
- **Files**: [bash_input.json](messages/user/bash_input.json)
```python
@@ -216,7 +226,7 @@ The corresponding output uses `` and optionally `` tag
- **Condition**: Contains `` tags
- **Content Model**: `BashOutputContent`
-- **CSS Class**: Part of bash tool pairing
+- **CSS Class**: `bash-output` (filtered by User)
- **Files**: [bash_output.json](messages/user/bash_output.json)
### Compacted Conversation
@@ -701,7 +711,7 @@ The message hierarchy is determined by **sequence and message type**, not by `pa
- Tool use/result pairs nest under assistant responses (Level 3)
- Sidechain messages nest under their Task result (Level 4+)
-```
+```text
Session header (Level 0)
└── User message (Level 1)
├── System message (Level 2)
diff --git a/test/__snapshots__/test_snapshot_html.ambr b/test/__snapshots__/test_snapshot_html.ambr
index c7d4aab1..018f4fa1 100644
--- a/test/__snapshots__/test_snapshot_html.ambr
+++ b/test/__snapshots__/test_snapshot_html.ambr
@@ -4316,7 +4316,11 @@
'thinking': { id: 'thinking', content: '💭 Thinking', style: 'background-color: #fce4ec;' },
'system': { id: 'system', content: '⚙️ System', style: 'background-color: #ffeee1;' },
'image': { id: 'image', content: '🖼️ Image', style: 'background-color: #e1f5fe;' },
- 'sidechain': { id: 'sidechain', content: '🔗 Sub-assistant', style: 'background-color: #f5f5f5;' }
+ 'sidechain': { id: 'sidechain', content: '🔗 Sub-assistant', style: 'background-color: #f5f5f5;' },
+ 'slash-command': { id: 'slash-command', content: '⌨️ Slash Command', style: 'background-color: #e8eaf6;' },
+ 'command-output': { id: 'command-output', content: '📋 Command Output', style: 'background-color: #efebe9;' },
+ 'bash-input': { id: 'bash-input', content: '💻 Bash Input', style: 'background-color: #e8eaf6;' },
+ 'bash-output': { id: 'bash-output', content: '📄 Bash Output', style: 'background-color: #efebe9;' }
};
// Build timeline data from messages
@@ -4342,6 +4346,14 @@
messageType = 'sidechain';
} else if (classList.includes('system-warning') || classList.includes('system-error') || classList.includes('system-info')) {
messageType = 'system';
+ } else if (classList.includes('slash-command')) {
+ messageType = 'slash-command';
+ } else if (classList.includes('command-output')) {
+ messageType = 'command-output';
+ } else if (classList.includes('bash-input')) {
+ messageType = 'bash-input';
+ } else if (classList.includes('bash-output')) {
+ messageType = 'bash-output';
} else {
// Look for standard message types
messageType = classList.find(cls =>
@@ -4466,10 +4478,15 @@
const activeTypes = Array.from(document.querySelectorAll('.filter-toggle.active'))
.map(toggle => toggle.dataset.type);
+ // Get all filter toggle types (to know which groups have filter controls)
+ const allFilterTypes = Array.from(document.querySelectorAll('.filter-toggle'))
+ .map(toggle => toggle.dataset.type);
+
// Update groups visibility based on filter states
+ // Groups with filter toggles follow toggle state; groups without toggles stay visible
const updatedGroups = groups.map(group => ({
...group,
- visible: activeTypes.includes(group.id)
+ visible: allFilterTypes.includes(group.id) ? activeTypes.includes(group.id) : true
}));
// Update timeline groups
@@ -4547,7 +4564,7 @@
axis: 2
},
groupOrder: (a, b) => {
- const order = ['user', 'assistant', 'sidechain', 'tool_use', 'tool_result', 'thinking', 'system', 'image'];
+ const order = ['user', 'system', 'slash-command', 'command-output', 'bash-input', 'bash-output', 'thinking', 'assistant', 'sidechain', 'tool_use', 'tool_result', 'image'];
return order.indexOf(a.id) - order.indexOf(b.id);
}
};
@@ -5359,7 +5376,7 @@
// Count messages by type and update button labels
function updateMessageCounts() {
- const messageTypes = ['user', 'assistant', 'sidechain', 'system', 'thinking', 'image'];
+ const messageTypes = ['assistant', 'sidechain', 'system', 'thinking', 'image'];
messageTypes.forEach(type => {
const messages = document.querySelectorAll(`.message.${type}:not(.session-header)`);
@@ -5379,8 +5396,23 @@
}
});
- // Handle combined "tool" filter (tool_use + tool_result + bash messages)
- const toolMessages = document.querySelectorAll(`.message.tool_use:not(.session-header), .message.tool_result:not(.session-header), .message.bash-input:not(.session-header), .message.bash-output:not(.session-header)`);
+ // Handle combined "user" filter (user + bash-input + bash-output)
+ const userMessages = document.querySelectorAll(`.message.user:not(.session-header), .message.bash-input:not(.session-header), .message.bash-output:not(.session-header)`);
+ const userCount = userMessages.length;
+ const userToggle = document.querySelector(`[data-type="user"]`);
+ const userCountSpan = userToggle ? userToggle.querySelector('.count') : null;
+
+ if (userCountSpan) {
+ userCountSpan.textContent = `(${userCount})`;
+ if (userCount === 0) {
+ userToggle.style.display = 'none';
+ } else {
+ userToggle.style.display = 'flex';
+ }
+ }
+
+ // Handle combined "tool" filter (tool_use + tool_result)
+ const toolMessages = document.querySelectorAll(`.message.tool_use:not(.session-header), .message.tool_result:not(.session-header)`);
const toolCount = toolMessages.length;
const toolToggle = document.querySelector(`[data-type="tool"]`);
const toolCountSpan = toolToggle ? toolToggle.querySelector('.count') : null;
@@ -5401,11 +5433,14 @@
.filter(toggle => toggle.classList.contains('active'))
.map(toggle => toggle.dataset.type);
- // Expand "tool" to include tool_use, tool_result, and bash messages
+ // Expand filter types to their corresponding CSS classes
const expandedTypes = [];
activeTypes.forEach(type => {
if (type === 'tool') {
- expandedTypes.push('tool_use', 'tool_result', 'bash-input', 'bash-output');
+ expandedTypes.push('tool_use', 'tool_result');
+ } else if (type === 'user') {
+ // User filter includes bash commands (user-initiated)
+ expandedTypes.push('user', 'bash-input', 'bash-output');
} else {
expandedTypes.push(type);
}
@@ -5449,7 +5484,7 @@
}
function updateVisibleCounts() {
- const messageTypes = ['user', 'assistant', 'sidechain', 'system', 'thinking', 'image'];
+ const messageTypes = ['assistant', 'sidechain', 'system', 'thinking', 'image'];
messageTypes.forEach(type => {
const visibleMessages = document.querySelectorAll(`.message.${type}:not(.session-header):not(.filtered-hidden)`);
@@ -5476,9 +5511,32 @@
}
});
- // Handle combined "tool" filter separately (includes bash messages)
- const visibleToolMessages = document.querySelectorAll(`.message.tool_use:not(.session-header):not(.filtered-hidden), .message.tool_result:not(.session-header):not(.filtered-hidden), .message.bash-input:not(.session-header):not(.filtered-hidden), .message.bash-output:not(.session-header):not(.filtered-hidden)`);
- const totalToolMessages = document.querySelectorAll(`.message.tool_use:not(.session-header), .message.tool_result:not(.session-header), .message.bash-input:not(.session-header), .message.bash-output:not(.session-header)`);
+ // Handle combined "user" filter separately (includes bash messages)
+ const visibleUserMessages = document.querySelectorAll(`.message.user:not(.session-header):not(.filtered-hidden), .message.bash-input:not(.session-header):not(.filtered-hidden), .message.bash-output:not(.session-header):not(.filtered-hidden)`);
+ const totalUserMessages = document.querySelectorAll(`.message.user:not(.session-header), .message.bash-input:not(.session-header), .message.bash-output:not(.session-header)`);
+ const visibleUserCount = visibleUserMessages.length;
+ const totalUserCount = totalUserMessages.length;
+
+ const userToggle = document.querySelector(`[data-type="user"]`);
+ const userCountSpan = userToggle ? userToggle.querySelector('.count') : null;
+
+ if (userCountSpan && totalUserCount > 0) {
+ const activeTypes = Array.from(filterToggles)
+ .filter(toggle => toggle.classList.contains('active'))
+ .map(toggle => toggle.dataset.type);
+
+ const isFiltering = activeTypes.length < filterToggles.length;
+
+ if (isFiltering && visibleUserCount !== totalUserCount) {
+ userCountSpan.textContent = `(${visibleUserCount}/${totalUserCount})`;
+ } else {
+ userCountSpan.textContent = `(${totalUserCount})`;
+ }
+ }
+
+ // Handle combined "tool" filter separately
+ const visibleToolMessages = document.querySelectorAll(`.message.tool_use:not(.session-header):not(.filtered-hidden), .message.tool_result:not(.session-header):not(.filtered-hidden)`);
+ const totalToolMessages = document.querySelectorAll(`.message.tool_use:not(.session-header), .message.tool_result:not(.session-header)`);
const visibleToolCount = visibleToolMessages.length;
const totalToolCount = totalToolMessages.length;
@@ -9052,7 +9110,11 @@
'thinking': { id: 'thinking', content: '💭 Thinking', style: 'background-color: #fce4ec;' },
'system': { id: 'system', content: '⚙️ System', style: 'background-color: #ffeee1;' },
'image': { id: 'image', content: '🖼️ Image', style: 'background-color: #e1f5fe;' },
- 'sidechain': { id: 'sidechain', content: '🔗 Sub-assistant', style: 'background-color: #f5f5f5;' }
+ 'sidechain': { id: 'sidechain', content: '🔗 Sub-assistant', style: 'background-color: #f5f5f5;' },
+ 'slash-command': { id: 'slash-command', content: '⌨️ Slash Command', style: 'background-color: #e8eaf6;' },
+ 'command-output': { id: 'command-output', content: '📋 Command Output', style: 'background-color: #efebe9;' },
+ 'bash-input': { id: 'bash-input', content: '💻 Bash Input', style: 'background-color: #e8eaf6;' },
+ 'bash-output': { id: 'bash-output', content: '📄 Bash Output', style: 'background-color: #efebe9;' }
};
// Build timeline data from messages
@@ -9078,6 +9140,14 @@
messageType = 'sidechain';
} else if (classList.includes('system-warning') || classList.includes('system-error') || classList.includes('system-info')) {
messageType = 'system';
+ } else if (classList.includes('slash-command')) {
+ messageType = 'slash-command';
+ } else if (classList.includes('command-output')) {
+ messageType = 'command-output';
+ } else if (classList.includes('bash-input')) {
+ messageType = 'bash-input';
+ } else if (classList.includes('bash-output')) {
+ messageType = 'bash-output';
} else {
// Look for standard message types
messageType = classList.find(cls =>
@@ -9202,10 +9272,15 @@
const activeTypes = Array.from(document.querySelectorAll('.filter-toggle.active'))
.map(toggle => toggle.dataset.type);
+ // Get all filter toggle types (to know which groups have filter controls)
+ const allFilterTypes = Array.from(document.querySelectorAll('.filter-toggle'))
+ .map(toggle => toggle.dataset.type);
+
// Update groups visibility based on filter states
+ // Groups with filter toggles follow toggle state; groups without toggles stay visible
const updatedGroups = groups.map(group => ({
...group,
- visible: activeTypes.includes(group.id)
+ visible: allFilterTypes.includes(group.id) ? activeTypes.includes(group.id) : true
}));
// Update timeline groups
@@ -9283,7 +9358,7 @@
axis: 2
},
groupOrder: (a, b) => {
- const order = ['user', 'assistant', 'sidechain', 'tool_use', 'tool_result', 'thinking', 'system', 'image'];
+ const order = ['user', 'system', 'slash-command', 'command-output', 'bash-input', 'bash-output', 'thinking', 'assistant', 'sidechain', 'tool_use', 'tool_result', 'image'];
return order.indexOf(a.id) - order.indexOf(b.id);
}
};
@@ -9759,7 +9834,7 @@