Skip to content

fix(landing-page): correct zone percentage thresholds to match code #260

fix(landing-page): correct zone percentage thresholds to match code

fix(landing-page): correct zone percentage thresholds to match code #260

Workflow file for this run

name: CI
on:
push:
branches: [main]
pull_request:
branches: [main]
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
# ============================================
# PYTHON SCRIPT - LINT AND TEST
# ============================================
python-lint:
name: Python Lint
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
cache: 'pip'
cache-dependency-path: 'requirements-dev.txt'
- name: Install dependencies
run: pip install -r requirements-dev.txt
- name: Run Ruff (linting)
run: ruff check scripts/statusline.py
- name: Run Ruff (formatting check)
run: ruff format --check scripts/statusline.py
- name: Run MyPy (type checking)
run: mypy scripts/statusline.py --ignore-missing-imports
python-test:
name: Python Tests (${{ matrix.os }}, Python ${{ matrix.python-version }})
needs: python-lint
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
python-version: ['3.9', '3.10', '3.11', '3.12']
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
cache: 'pip'
cache-dependency-path: 'requirements-dev.txt'
- name: Install dependencies
run: |
pip install -r requirements-dev.txt
pip install -e .
- name: Run pytest with coverage
shell: bash
run: |
# Run pytest with coverage. On Windows, pytest-cov can exit with code 1
# after all tests pass due to a known interaction between the coverage
# C extension tracer and Windows process shutdown. We capture the result,
# and on Windows only, treat exit code 1 as success when no tests failed.
set +e
pytest tests/python/ -v --cov=scripts --cov=src --cov-report=xml --cov-report=term 2>&1 | tee pytest_output.txt
PYTEST_EXIT=${PIPESTATUS[0]}
set -e
if [ "$RUNNER_OS" = "Windows" ] && [ "$PYTEST_EXIT" -eq 1 ]; then
if grep -qE "^(FAILED|ERROR)" pytest_output.txt; then
echo "Tests actually failed on Windows"
exit 1
else
echo "Exit code 1 on Windows with no test failures — treating as success"
exit 0
fi
fi
exit $PYTEST_EXIT
- name: Upload coverage to Codecov
if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.11'
uses: codecov/codecov-action@v4
with:
files: ./coverage.xml
flags: python
fail_ci_if_error: false
# ============================================
# INTEGRATION TESTS
# ============================================
integration-test:
name: Integration Tests (${{ matrix.os }})
needs: python-test
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest]
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Test Python script produces non-empty output
run: |
SAMPLE_INPUT='{"model":{"display_name":"Claude 3.5 Sonnet"},"workspace":{"current_dir":"/tmp/test","project_dir":"/tmp/test"},"context_window":{"context_window_size":200000,"current_usage":{"input_tokens":1000,"cache_creation_input_tokens":500,"cache_read_input_tokens":200}}}'
echo "Testing Python script..."
echo "$SAMPLE_INPUT" | python3 ./scripts/statusline.py
echo "All integration tests passed!"
# ============================================
# END-TO-END TESTS
# ============================================
e2e-install-pip:
name: E2E Install pip (${{ matrix.os }}, Python ${{ matrix.python-version }})
needs: [python-lint]
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest]
python-version: ['3.9', '3.10', '3.11', '3.12']
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install via pip
run: pip install .
- name: Verify claude-statusline command is available
run: |
which claude-statusline
echo "✓ claude-statusline is on PATH"
- name: Verify context-stats command is available
run: |
which context-stats
echo "✓ context-stats is on PATH"
- name: Verify claude-statusline produces output
run: |
SAMPLE_INPUT='{"model":{"display_name":"Claude 3.5 Sonnet"},"workspace":{"current_dir":"/tmp/test","project_dir":"/tmp/test"},"context_window":{"context_window_size":200000,"current_usage":{"input_tokens":1000,"cache_creation_input_tokens":500,"cache_read_input_tokens":200}}}'
OUTPUT=$(echo "$SAMPLE_INPUT" | claude-statusline)
test -n "$OUTPUT"
echo "✓ claude-statusline produces non-empty output via pip install"
echo " Output: $OUTPUT"
e2e-exec:
name: E2E Exec (${{ matrix.os }})
needs: [python-lint]
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest]
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Test Python script produces non-empty output
run: |
SAMPLE_INPUT='{"model":{"display_name":"Claude 3.5 Sonnet"},"workspace":{"current_dir":"/tmp/test","project_dir":"/tmp/test"},"context_window":{"context_window_size":200000,"current_usage":{"input_tokens":1000,"cache_creation_input_tokens":500,"cache_read_input_tokens":200}}}'
echo "=== Python script ==="
OUTPUT=$(echo "$SAMPLE_INPUT" | python3 ./scripts/statusline.py)
test -n "$OUTPUT" || { echo "✗ statusline.py produced empty output"; exit 1; }
echo "✓ statusline.py: $OUTPUT"
echo ""
echo "All E2E execution tests passed!"
- name: Test with 1M model input
run: |
SAMPLE_1M='{"model":{"display_name":"Claude Opus 4.6"},"workspace":{"current_dir":"/tmp/test","project_dir":"/tmp/test"},"context_window":{"context_window_size":1000000,"current_usage":{"input_tokens":50000,"cache_creation_input_tokens":10000,"cache_read_input_tokens":5000}}}'
echo "=== 1M model context ==="
OUTPUT=$(echo "$SAMPLE_1M" | python3 ./scripts/statusline.py)
test -n "$OUTPUT" || { echo "✗ statusline.py (1M) produced empty output"; exit 1; }
echo "✓ statusline.py (1M): $OUTPUT"
- name: Test with edge case inputs
run: |
echo "=== Empty/zero context ==="
ZERO_CTX='{"model":{"display_name":"Test"},"workspace":{"current_dir":"/tmp","project_dir":"/tmp"},"context_window":{"context_window_size":0,"current_usage":{"input_tokens":0,"cache_creation_input_tokens":0,"cache_read_input_tokens":0}}}'
OUTPUT=$(echo "$ZERO_CTX" | python3 ./scripts/statusline.py)
test -n "$OUTPUT" || { echo "✗ statusline.py (zero ctx) produced empty output"; exit 1; }
echo "✓ statusline.py handles zero context"
echo "=== High utilization ==="
HIGH_USE='{"model":{"display_name":"Claude 3.5 Sonnet"},"workspace":{"current_dir":"/tmp","project_dir":"/tmp"},"context_window":{"context_window_size":200000,"current_usage":{"input_tokens":180000,"cache_creation_input_tokens":0,"cache_read_input_tokens":0}}}'
OUTPUT=$(echo "$HIGH_USE" | python3 ./scripts/statusline.py)
test -n "$OUTPUT" || { echo "✗ statusline.py (high use) produced empty output"; exit 1; }
echo "✓ statusline.py handles high utilization"
# ============================================
# FINAL STATUS CHECK
# ============================================
ci-success:
name: CI Success
needs: [python-lint, python-test, integration-test, e2e-install-pip, e2e-exec]
runs-on: ubuntu-latest
if: always()
steps:
- name: Check all jobs passed
run: |
if [[ "${{ needs.python-lint.result }}" != "success" ]] || \
[[ "${{ needs.python-test.result }}" != "success" ]] || \
[[ "${{ needs.integration-test.result }}" != "success" ]] || \
[[ "${{ needs.e2e-install-pip.result }}" != "success" ]] || \
[[ "${{ needs.e2e-exec.result }}" != "success" ]]; then
echo "One or more jobs failed"
exit 1
fi
echo "All CI jobs passed successfully!"