Skip to content

llama multi gpu rocm #1686

llama multi gpu rocm

llama multi gpu rocm #1686

Workflow file for this run

name: CodeQL
on:
push:
branches:
- main
pull_request:
branches:
- main
schedule:
- cron: '42 20 * * 4'
jobs:
analyze:
name: Analyze
strategy:
fail-fast: false
matrix:
language: [ 'cpp' ]
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
runs-on: ubuntu-latest
container:
image: ghcr.io/microsoft/ark/ark:base-dev-cuda12.1
permissions:
actions: read
contents: read
security-events: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Check disk space
run: |
df -h
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
- name: Dubious ownership exception
run: |
git config --global --add safe.directory /__w/ark/ark
- name: Build
run: |
cmake -DBYPASS_GPU_CHECK=ON -DUSE_CUDA=ON .
make -j build
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
with:
category: "/language:${{matrix.language}}"