Skip to content

chore(main): release 0.0.23 #162

chore(main): release 0.0.23

chore(main): release 0.0.23 #162

Workflow file for this run

name: CI
on:
push:
branches: [main, master]
pull_request:
branches: [main, master]
jobs:
test:
name: Test
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: '1.26'
cache: true
- name: Setup ccache
uses: hendrikmuhs/ccache-action@v1.2
with:
key: ${{ runner.os }}-go-cgo
max-size: 500M
- name: Test
run: |
# Use ccache to speed up repeated CGO (C) compilation of sqlite3/tree-sitter.
export CC="ccache gcc"
export CXX="ccache g++"
# Print a heartbeat every 60 s so GitHub Actions does not kill the
# runner during the silent CGO (sqlite3) compilation phase.
(while true; do echo "[ci] tests still running…"; sleep 60; done) &
HEARTBEAT=$!
trap "kill $HEARTBEAT 2>/dev/null || true" EXIT
CGO_ENABLED=1 go test -p 1 -tags=fts5 -timeout=30m \
-coverprofile=coverage.out -covermode=atomic ./...
- name: Upload coverage to Coveralls
uses: coverallsapp/github-action@v2
with:
file: coverage.out
format: golang
- name: Script tests
run: bash scripts/test_run.sh
- name: Vet
run: go vet -tags=fts5 ./...
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: '1.26'
cache: false
- name: golangci-lint
uses: golangci/golangci-lint-action@v8
with:
version: latest
args: --build-tags=fts5
e2e:
name: E2E
runs-on: ubuntu-latest
services:
ollama:
image: ollama/ollama:latest
ports:
- 11434:11434
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: '1.26'
cache: true
- name: Wait for Ollama
run: |
for i in $(seq 1 30); do
curl -sf http://localhost:11434/ && break
sleep 1
done
- name: Pull embedding model
run:
'curl -sf http://localhost:11434/api/pull -d ''{"name":"all-minilm"}'''
- name: E2E tests
run: make e2e
env:
OLLAMA_HOST: http://localhost:11434
LUMEN_EMBED_MODEL: all-minilm
LUMEN_EMBED_DIMS: '384'
- name: E2E lang tests
run: make e2e-lang
env:
OLLAMA_HOST: http://localhost:11434
LUMEN_EMBED_MODEL: all-minilm
LUMEN_EMBED_DIMS: '384'