diff --git a/.github/actions/build_container/action.yml b/.github/actions/build_container/action.yml deleted file mode 100644 index 10a91ff..0000000 --- a/.github/actions/build_container/action.yml +++ /dev/null @@ -1,64 +0,0 @@ -name: Builds the container and chroots -description: | - This Github Actions builds the container and sets up the chroot environment for building packages for noble and questing. - -inputs: - - arch: - description: The architecture to build for. Ex amd64, arm64, etc - required: true - - push-to-ghcr: - description: Whether to push the built image to GitHub Container Registry - required: true - default: "true" - - token: - description: PAT token - required: true - - username: - description: Username for the PAT token - required: true - -runs: - using: "composite" - - steps: - - - name: Build noble and questing containers - shell: bash - run: ./scripts/docker_deb_build.py --rebuild - -# - name: Checkout pkg-example at debian/latest and tags -# uses: actions/checkout@v4 -# with: -# repository: qualcomm-linux/pkg-example -# ref: debian/latest -# token: ${{inputs.token}} -# path: ./pkg-example -# fetch-depth: 0 -# fetch-tags: true - -# - name: Build example package for noble -# shell: bash -# run: | -# mkdir -p build_noble -# ./scripts/docker_deb_build.py --source-dir pkg-example --output-dir build_noble --distro noble - -# - name: Build example package for questing -# shell: bash -# run: | -# mkdir -p build_questing -# ./scripts/docker_deb_build.py --source-dir pkg-example --output-dir build_questing --distro questing - - # Don't push to GHCR if build was triggered by a PR - - name: Push to GHCR - if: ${{inputs.push-to-ghcr}} == 'true' - shell: bash - run: | - echo ${{inputs.token}} | docker login ghcr.io -u ${{inputs.username}} --password-stdin - docker push ghcr.io/${{env.QCOM_ORG_NAME}}/${{env.IMAGE_NAME}}:${{inputs.arch}}-noble - docker push ghcr.io/${{env.QCOM_ORG_NAME}}/${{env.IMAGE_NAME}}:${{inputs.arch}}-questing - - diff --git a/.github/workflows/qcom-build-pkg-reusable-workflow.yml b/.github/workflows/qcom-build-pkg-reusable-workflow.yml index 74e148b..e6b1308 100644 --- a/.github/workflows/qcom-build-pkg-reusable-workflow.yml +++ b/.github/workflows/qcom-build-pkg-reusable-workflow.yml @@ -13,10 +13,10 @@ on: required: true debian-ref: - description: The debian ref to build. For example branch "debian/latest" or tag "debian/1.0.0-1" + description: The debian ref to build. For example branch "debian/qcom-next" or tag "debian/1.0.0-1" type: string required: true - default: debian/latest + default: debian/qcom-next distro-codename: description: The distribution codename to build for. Ex noble, jammy, etc @@ -34,7 +34,7 @@ on: default: false is-post-merge: - description: true if this build has been triggered by a merge into debian/latest. Will ensure PR branch is deleted + description: true if this build has been triggered by a merge into debian/qcom-next. Will ensure PR branch is deleted type: boolean default: false @@ -65,6 +65,7 @@ jobs: shell: bash container: + # This docker image is built and published by the qualcomm-linux/docker_deb_build repo CI workflow image: ghcr.io/qualcomm-linux/pkg-builder:${{inputs.runner == 'ubuntu-latest' && 'amd64' || 'arm64'}}-${{inputs.distro-codename}} options: --privileged credentials: diff --git a/.github/workflows/qcom-container-build-and-upload.yml b/.github/workflows/qcom-container-build-and-upload.yml deleted file mode 100644 index 68f3566..0000000 --- a/.github/workflows/qcom-container-build-and-upload.yml +++ /dev/null @@ -1,74 +0,0 @@ -name: Container Build And Upload -description: | - Builds and uploads to GHCR (GitHub Container Registry) the container used to build the Qualcomm debian packages. - This workflow will assumes the build architecture is amd64 (x86_64) since the github's 'ubuntu-latest' runs-on tag - is used. Using docker's buildx, the Dockerfile in this repo's docker/ folder will be built for amd64 and cross-compiled - for arm64. - -on: - schedule: - # Runs at 00:00 UTC every Monday - - cron: '0 0 * * 1' - - pull_request_target: - branches: - - main - paths: - - '.github/workflows/qcom-container-build-and-upload.yml' - - 'docker/**' - - push: - branches: - - main - paths: - - '.github/workflows/qcom-container-build-and-upload.yml' - - 'docker/**' - - workflow_dispatch: - -permissions: - contents: read - -env: - QCOM_ORG_NAME: "qualcomm-linux" - - IMAGE_NAME: "pkg-builder" - -jobs: - - # Build the amd64 natively on ubuntu-latest which is an x86_64 host - build-image-amd64: - runs-on: ubuntu-latest - steps: - - - name: Checkout qcom-build-utils - uses: actions/checkout@v4 - with: - ref: ${{github.head_ref}} - - - name: Build Images - uses: ./.github/actions/build_container - with: - arch: amd64 - push-to-ghcr: ${{ github.event_name != 'pull_request_target' }} - token: ${{ secrets.DEB_PKG_BOT_CI_TOKEN }} - username: ${{ vars.DEB_PKG_BOT_CI_USERNAME }} - - # Build the arm64 natively using a self-hosted runner on an arm64 host - # Cross compiling the image using buildx on an x86_64 host was tried but failed due to - # issues with qemu and multiarch support in docker buildx. - build-image-arm64: - runs-on: ["self-hosted", "lecore-prd-u2404-arm64-xlrg-od-ephem"] - steps: - - name: Checkout Dockerfile - uses: actions/checkout@v4 - with: - ref: ${{github.head_ref}} - - - name: Build Images - uses: ./.github/actions/build_container - with: - arch: arm64 - push-to-ghcr: ${{ github.event_name != 'pull_request_target' }} - token: ${{ secrets.DEB_PKG_BOT_CI_TOKEN }} - username: ${{ vars.DEB_PKG_BOT_CI_USERNAME }} \ No newline at end of file diff --git a/.github/workflows/qcom-promote-upstream-reusable-workflow.yml b/.github/workflows/qcom-promote-upstream-reusable-workflow.yml index 97405ec..61d687e 100644 --- a/.github/workflows/qcom-promote-upstream-reusable-workflow.yml +++ b/.github/workflows/qcom-promote-upstream-reusable-workflow.yml @@ -54,6 +54,7 @@ jobs: shell: bash container: + # This docker image is built and published by the qualcomm-linux/docker_deb_build repo CI workflow image: ghcr.io/qualcomm-linux/pkg-builder:amd64-noble credentials: username: ${{ vars.DEB_PKG_BOT_CI_USERNAME }} diff --git a/.github/workflows/qcom-release-reusable-workflow.yml b/.github/workflows/qcom-release-reusable-workflow.yml index bf36592..9ab1a5d 100644 --- a/.github/workflows/qcom-release-reusable-workflow.yml +++ b/.github/workflows/qcom-release-reusable-workflow.yml @@ -55,6 +55,7 @@ jobs: shell: bash container: + # This docker image is built and published by the qualcomm-linux/docker_deb_build repo CI workflow image: ghcr.io/qualcomm-linux/pkg-builder:${{inputs.runner == 'ubuntu-latest' && 'amd64' || 'arm64'}}-${{inputs.distro-codename}} options: --privileged credentials: diff --git a/.github/workflows/qcom-upstream-pr-pkg-build-reusable-workflow.yml b/.github/workflows/qcom-upstream-pr-pkg-build-reusable-workflow.yml index 2cde0c0..b057e90 100644 --- a/.github/workflows/qcom-upstream-pr-pkg-build-reusable-workflow.yml +++ b/.github/workflows/qcom-upstream-pr-pkg-build-reusable-workflow.yml @@ -68,6 +68,7 @@ jobs: shell: bash container: + # This docker image is built and published by the qualcomm-linux/docker_deb_build repo CI workflow image: ghcr.io/qualcomm-linux/pkg-builder:${{inputs.runner == 'ubuntu-latest' && 'amd64' || 'arm64'}}-${{inputs.distro-codename}} options: --privileged credentials: @@ -125,7 +126,7 @@ jobs: git remote add upstream-source ../upstream-repo git fetch upstream-source "+refs/tags/*:refs/tags/*" - - name: Merge Upstream PR Changes to Packaging Repo on top of debian/latest + - name: Merge Upstream PR Changes to Packaging Repo on top of debian/qcom-next run: | cd ./package-repo @@ -133,7 +134,7 @@ jobs: git config user.email "${{vars.DEB_PKG_BOT_CI_EMAIL}}" git checkout upstream/latest - git checkout debian/latest + git checkout debian/qcom-next git checkout -b debian/upstream-pr version=$(dpkg-parsechangelog --show-field Version) @@ -148,9 +149,9 @@ jobs: echo "upstream_version=$upstream_version" >> $GITHUB_ENV echo "distro_revision=$distro_revision" >> $GITHUB_ENV - # Run gbp with --no-merge, this is because we do not want gbp to perform the final merging of upstream/latest into debian/latest + # Run gbp with --no-merge, this is because we do not want gbp to perform the final merging of upstream/latest into debian/qcom-next # This because in upstream/latest, the top commit is the one of the filtering of git, github and debian folder. gbp will complain - # the merge is unseccessful because we want to merge with debian/latest, which has a workflow in .github, resulting in a merge conflict + # the merge is unseccessful because we want to merge with debian/qcom-next, which has a workflow in .github, resulting in a merge conflict # where in one branch we deleted .github folder, and the other we add files. We will therefore take the matter in our own hands manually after. gbp import-orig \ @@ -171,8 +172,8 @@ jobs: git merge \ --allow-unrelated-histories \ --signoff \ - -m "Merge upstream/pr into debian/latest" \ - -m "Filtered out .git, .github and debian from upstram, and preserved .github debian/latest" \ + -m "Merge upstream/pr into debian/qcom-next" \ + -m "Filtered out .git, .github and debian from upstram, and preserved .github debian/qcom-next" \ upstream/latest - name: Promote Changelog diff --git a/docker/Dockerfile.amd64.noble b/docker/Dockerfile.amd64.noble deleted file mode 100644 index 4e03f85..0000000 --- a/docker/Dockerfile.amd64.noble +++ /dev/null @@ -1,70 +0,0 @@ -# This dockerfile builds is assumed to be built by an amd64 machine FOR an amd64 machine, which will contain -# two chroots (noble and questing) to cross compile debian packages for arm64. - -# Use an official Ubuntu base image -FROM ubuntu:24.04 - -# Prevent interactive prompts during package installation -ENV DEBIAN_FRONTEND=noninteractive - -# Update package list and install dependencies -RUN apt-get update && apt-get install -y \ - curl \ - git \ - git-buildpackage \ - sbuild \ - schroot \ - ubuntu-dev-tools \ - gh \ - jq \ - debootstrap \ - tree \ - build-essential \ - debhelper \ - abigail-tools \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - - -COPY extra-packages.txt /tmp/extra-packages.txt - -# Build the noble chroot -RUN EXTRA_PACKAGES=$(tr -s '[:space:]' ',' < /tmp/extra-packages.txt) && \ - sbuild-createchroot --include="$EXTRA_PACKAGES" --arch=amd64 --components=main,universe noble /srv/chroot/noble http://archive.ubuntu.com/ubuntu && \ - chroot /srv/chroot/noble /bin/bash -c "\ - echo 'deb [arch=amd64] http://archive.ubuntu.com/ubuntu noble main restricted universe multiverse' > /etc/apt/sources.list && \ - echo 'deb [arch=amd64] http://archive.ubuntu.com/ubuntu noble-updates main restricted universe multiverse' >> /etc/apt/sources.list && \ - echo 'deb [arch=amd64] http://archive.ubuntu.com/ubuntu noble-security main restricted universe multiverse' >> /etc/apt/sources.list && \ - echo 'deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports noble main restricted universe multiverse' > /etc/apt/sources.list.d/arm64-ports.list && \ - echo 'deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports noble-updates main restricted universe multiverse' >> /etc/apt/sources.list.d/arm64-ports.list && \ - echo 'deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports noble-security main restricted universe multiverse' >> /etc/apt/sources.list.d/arm64-ports.list && \ - dpkg --add-architecture arm64 && \ - apt-get update && \ - apt-get upgrade -y && \ - apt-get install -y crossbuild-essential-arm64 \ - gcc-13-base:arm64 \ - gcc-14-base:arm64 \ - libasan8:arm64 \ - libatomic1:arm64 \ - libc6:arm64 \ - libc6-dev:arm64 \ - libcrypt-dev:arm64 \ - libcrypt1:arm64 \ - libgcc-13-dev:arm64 \ - libgcc-s1:arm64 \ - libgomp1:arm64 \ - libhwasan0:arm64 \ - libitm1:arm64 \ - liblsan0:arm64 \ - libstdc++-13-dev:arm64 \ - libstdc++6:arm64 \ - libtsan2:arm64 \ - libubsan1:arm64 \ - linux-libc-dev:arm64 \ - lintian:amd64" - -# Set working directory -WORKDIR /workspace - -# Default command -CMD [ "bash" ] \ No newline at end of file diff --git a/docker/Dockerfile.amd64.questing b/docker/Dockerfile.amd64.questing deleted file mode 100644 index 851a48e..0000000 --- a/docker/Dockerfile.amd64.questing +++ /dev/null @@ -1,71 +0,0 @@ -# This dockerfile builds is assumed to be built by an amd64 machine FOR an amd64 machine, which will contain -# two chroots (noble and questing) to cross compile debian packages for arm64. - -# Use an official Ubuntu base image -FROM ubuntu:25.10 - -# Prevent interactive prompts during package installation -ENV DEBIAN_FRONTEND=noninteractive - -# Update package list and install dependencies -RUN apt-get update && apt-get install -y \ - curl \ - git \ - git-buildpackage \ - sbuild \ - schroot \ - ubuntu-dev-tools \ - gh \ - jq \ - debootstrap \ - tree \ - build-essential \ - debhelper \ - abigail-tools \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - - -COPY extra-packages.txt /tmp/extra-packages.txt - -# Build the questing chroot -RUN EXTRA_PACKAGES=$(tr -s '[:space:]' ',' < /tmp/extra-packages.txt) && \ - sbuild-createchroot --include="$EXTRA_PACKAGES" --arch=amd64 --components=main,universe questing /srv/chroot/questing http://archive.ubuntu.com/ubuntu && \ - chroot /srv/chroot/questing /bin/bash -c "\ - echo 'deb [arch=amd64] http://archive.ubuntu.com/ubuntu questing main restricted universe multiverse' > /etc/apt/sources.list && \ - echo 'deb [arch=amd64] http://archive.ubuntu.com/ubuntu questing-updates main restricted universe multiverse' >> /etc/apt/sources.list && \ - echo 'deb [arch=amd64] http://archive.ubuntu.com/ubuntu questing-security main restricted universe multiverse' >> /etc/apt/sources.list && \ - echo 'deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports questing main restricted universe multiverse' > /etc/apt/sources.list.d/arm64-ports.list && \ - echo 'deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports questing-updates main restricted universe multiverse' >> /etc/apt/sources.list.d/arm64-ports.list && \ - echo 'deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports questing-security main restricted universe multiverse' >> /etc/apt/sources.list.d/arm64-ports.list && \ - dpkg --add-architecture arm64 && \ - apt-get update && \ - apt-get upgrade -y && \ - apt-get install -y crossbuild-essential-arm64 \ - gcc-13-base:arm64 \ - gcc-14-base:arm64 \ - libasan8:arm64 \ - libatomic1:arm64 \ - libc6:arm64 \ - libc6-dev:arm64 \ - libcrypt-dev:arm64 \ - libcrypt1:arm64 \ - libgcc-13-dev:arm64 \ - libgcc-s1:arm64 \ - libgomp1:arm64 \ - libhwasan0:arm64 \ - libitm1:arm64 \ - liblsan0:arm64 \ - libstdc++-13-dev:arm64 \ - libstdc++6:arm64 \ - libtsan2:arm64 \ - libubsan1:arm64 \ - linux-libc-dev:arm64 \ - lintian:amd64 && \ - apt-get install --reinstall apt" - -# Set working directory -WORKDIR /workspace - -# Default command -CMD [ "bash" ] \ No newline at end of file diff --git a/docker/Dockerfile.arm64.noble b/docker/Dockerfile.arm64.noble deleted file mode 100644 index 702bfb1..0000000 --- a/docker/Dockerfile.arm64.noble +++ /dev/null @@ -1,36 +0,0 @@ -# This dockerfile builds is assumed to be built by an arm64 machine FOR an arm64 machine, which will contain -# two chroots (noble and questing) to natively build for arm64. - -# Use an official Ubuntu base image -FROM ubuntu:24.04 - -# Prevent interactive prompts during package installation -ENV DEBIAN_FRONTEND=noninteractive - -# Update package list and install dependencies -RUN apt-get update && apt-get install -y \ - curl \ - git \ - git-buildpackage \ - sbuild \ - schroot \ - gh \ - jq \ - debootstrap \ - tree \ - build-essential \ - debhelper \ - abigail-tools \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -COPY extra-packages.txt /tmp/extra-packages.txt - -RUN EXTRA_PACKAGES=$(tr -s '[:space:]' ',' < /tmp/extra-packages.txt) && \ - sbuild-createchroot --include="$EXTRA_PACKAGES" --arch=arm64 --components=main,universe noble /srv/chroot/noble http://ports.ubuntu.com - -# Set working directory -WORKDIR /workspace - -# Default command -CMD [ "bash" ] \ No newline at end of file diff --git a/docker/Dockerfile.arm64.questing b/docker/Dockerfile.arm64.questing deleted file mode 100644 index aa66c02..0000000 --- a/docker/Dockerfile.arm64.questing +++ /dev/null @@ -1,36 +0,0 @@ -# This dockerfile builds is assumed to be built by an arm64 machine FOR an arm64 machine, which will contain -# two chroots (noble and questing) to natively build for arm64. - -# Use an official Ubuntu base image -FROM ubuntu:25.10 - -# Prevent interactive prompts during package installation -ENV DEBIAN_FRONTEND=noninteractive - -# Update package list and install dependencies -RUN apt-get update && apt-get install -y \ - curl \ - git \ - git-buildpackage \ - sbuild \ - schroot \ - gh \ - jq \ - debootstrap \ - tree \ - build-essential \ - debhelper \ - abigail-tools \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -COPY extra-packages.txt /tmp/extra-packages.txt - -RUN EXTRA_PACKAGES=$(tr -s '[:space:]' ',' < /tmp/extra-packages.txt) && \ - sbuild-createchroot --include="$EXTRA_PACKAGES" --arch=arm64 --components=main,universe questing /srv/chroot/questing http://ports.ubuntu.com - -# Set working directory -WORKDIR /workspace - -# Default command -CMD [ "bash" ] \ No newline at end of file diff --git a/docker/extra-packages.txt b/docker/extra-packages.txt deleted file mode 100644 index 24714de..0000000 --- a/docker/extra-packages.txt +++ /dev/null @@ -1,5 +0,0 @@ -autoconf automake autopoint autotools-dev bsdextrautils debhelper debugedit -dh-autoreconf dh-strip-nondeterminism dwz file gettext gettext-base -groff-base intltool-debian libarchive-zip-perl libdebhelper-perl libdw1t64 -libelf1t64 libfile-stripnondeterminism-perl libmagic-mgc -libmagic1t64 libpipeline1 libsub-override-perl libtool libuchardet0 m4 man-db po-debconf diff --git a/docs/README.md b/docs/README.md index fc4836a..f807d8e 100644 --- a/docs/README.md +++ b/docs/README.md @@ -29,7 +29,7 @@ For package repository maintainers looking to use these workflows: - **PKG_REPO_GITHUB_NAME Variable**: A repository variable set in the upstream repository that links it to its associated package repository - **Reusable Workflows**: Centralized workflow definitions in qcom-build-utils that are called from package repositories - **Composite Actions**: Modular steps that perform specific tasks like building packages or checking ABI compatibility -- **Debian Branches**: Git branches following the `debian/` prefix convention (e.g., `debian/latest`, `debian/1.0.0-1`) +- **Debian Branches**: Git branches following the `debian/` prefix convention (e.g., `debian/qcom-next`, `debian/1.0.0-1`) - **Upstream Branches**: Git branches following the `upstream/` prefix convention for tracking upstream source code ### Repository Linking diff --git a/docs/package-repo-integration.md b/docs/package-repo-integration.md index 032230a..ce2e4ff 100644 --- a/docs/package-repo-integration.md +++ b/docs/package-repo-integration.md @@ -48,16 +48,16 @@ Package repositories use a specific branch structure: ```mermaid gitGraph commit id: "Initial" - branch debian/latest + branch debian/qcom-next commit id: "Debian packaging" branch upstream/latest commit id: "Upstream v1.0.0" - checkout debian/latest + checkout debian/qcom-next merge upstream/latest tag: "Merge upstream" commit id: "Update changelog" branch debian/pr/1.1.0-1 commit id: "Promote to 1.1.0" - checkout debian/latest + checkout debian/qcom-next merge debian/pr/1.1.0-1 commit id: "debian/1.1.0-1" tag: "debian/1.1.0-1" ``` @@ -67,7 +67,7 @@ gitGraph | Branch | Purpose | Protected | |--------|---------|-----------| | `main` | Primary development branch | Yes | -| `debian/latest` | Latest Debian packaging branch | Yes | +| `debian/qcom-next` | Latest Debian packaging branch | Yes | | `debian/` | Version-specific branches (created from tags) | No | | `debian/pr/` | PR branches for version promotions | No | | `upstream/latest` | Latest upstream source (non-native packages) | No | @@ -105,7 +105,7 @@ If you prefer to set up a repository manually instead of using the template: gh repo create qualcomm-linux/pkg-mypackage ``` -2. **Set up branch protection** for `debian/latest`: +2. **Set up branch protection** for `debian/qcom-next`: - Require pull request reviews - Require status checks to pass - Require branches to be up to date @@ -205,7 +205,7 @@ description: | on: pull_request_target: - branches: [ debian/latest ] + branches: [ debian/qcom-next ] permissions: contents: read @@ -230,11 +230,11 @@ jobs: ```yaml name: Post-Merge description: | - Test that debian/latest builds after the merge. Once the build passes, push the new version to the staging repo + Test that debian/qcom-next builds after the merge. Once the build passes, push the new version to the staging repo on: push: - branches: [ debian/latest ] + branches: [ debian/qcom-next ] permissions: contents: read @@ -245,7 +245,7 @@ jobs: uses: qualcomm-linux/qcom-build-utils/.github/workflows/qcom-build-pkg-reusable-workflow.yml@development with: qcom-build-utils-ref: development - debian-ref: debian/latest + debian-ref: debian/qcom-next push-to-repo: true run-abi-checker: true is-post-merge: true @@ -258,9 +258,9 @@ jobs: > **Note**: If you created your repository from pkg-template, the initial structure is already in place. You should customize the files and then commit your changes. -1. Create `debian/latest` branch: +1. Create `debian/qcom-next` branch: ```bash - git checkout -b debian/latest + git checkout -b debian/qcom-next ``` 2. Commit all files: @@ -271,7 +271,7 @@ jobs: 3. Push to remote: ```bash - git push origin debian/latest + git push origin debian/qcom-next ``` ## Workflow Usage @@ -282,11 +282,11 @@ The typical workflow for making changes: ```mermaid flowchart TD - A[Create feature branch
from debian/latest] --> B[Make changes to
debian/ or src/] + A[Create feature branch
from debian/qcom-next] --> B[Make changes to
debian/ or src/] B --> C[Update debian/changelog] C --> D[Commit changes] D --> E[Push branch] - E --> F[Open Pull Request
to debian/latest] + E --> F[Open Pull Request
to debian/qcom-next] F --> G[Pre-merge workflow runs] G --> H{Build succeeds?} H -->|No| I[Fix issues, push again] @@ -302,7 +302,7 @@ flowchart TD #### 1. Create Feature Branch ```bash -git checkout debian/latest +git checkout debian/qcom-next git pull git checkout -b fix/my-change ``` @@ -317,7 +317,7 @@ Edit source code, Debian files, etc. export DEBFULLNAME="Your Name" export DEBEMAIL="your.email@qualcomm.com" -gbp dch --debian-branch=debian/latest --new-version=1.0.1-1 --distribution=noble +gbp dch --debian-branch=debian/qcom-next --new-version=1.0.1-1 --distribution=noble ``` Or manually edit `debian/changelog`: @@ -342,7 +342,7 @@ git push origin fix/my-change #### 5. Create Pull Request ```bash -gh pr create --base debian/latest --title "Fix important bug" +gh pr create --base debian/qcom-next --title "Fix important bug" ``` #### 6. Wait for Pre-merge Checks @@ -512,7 +512,7 @@ flowchart TD subgraph "Development" A[Developer creates feature branch] --> B[Make changes] B --> C[Update changelog] - C --> D[Open PR to debian/latest] + C --> D[Open PR to debian/qcom-next] end subgraph "Pre-Merge (PR Validation)" @@ -614,7 +614,7 @@ override_dh_strip: **Solution**: ```bash git checkout --theirs debian/changelog -gbp dch --debian-branch=debian/latest --auto +gbp dch --debian-branch=debian/qcom-next --auto git add debian/changelog git commit ``` @@ -648,7 +648,7 @@ Follow Debian versioning: ### 5. Branch Management -- Keep `debian/latest` clean and working +- Keep `debian/qcom-next` clean and working - Delete PR branches after merging - Use descriptive branch names diff --git a/docs/reusable-workflows.md b/docs/reusable-workflows.md index 1f6f5df..64433c2 100644 --- a/docs/reusable-workflows.md +++ b/docs/reusable-workflows.md @@ -46,12 +46,12 @@ flowchart TD | Parameter | Type | Required | Default | Description | |-----------|------|----------|---------|-------------| | `qcom-build-utils-ref` | string | Yes | - | The ref (branch/tag) of qcom-build-utils to use | -| `debian-ref` | string | Yes | `debian/latest` | The debian branch/tag to build | +| `debian-ref` | string | Yes | `debian/qcom-next` | The debian branch/tag to build | | `distro-codename` | string | No | `noble` | Ubuntu distribution codename (noble, jammy, questing, etc.) | | `run-lintian` | boolean | No | `false` | Whether to run lintian during build | | `run-abi-checker` | boolean | No | `false` | Whether to check ABI compatibility | | `push-to-repo` | boolean | No | `false` | Whether to push built package to repository | -| `is-post-merge` | boolean | No | `false` | True if triggered by merge to debian/latest | +| `is-post-merge` | boolean | No | `false` | True if triggered by merge to debian/qcom-next | | `runner` | string | No | `lecore-prd-u2404-arm64-xlrg-od-ephem` | GitHub runner to use | ### Secrets @@ -100,7 +100,7 @@ jobs: uses: qualcomm-linux/qcom-build-utils/.github/workflows/qcom-build-pkg-reusable-workflow.yml@development with: qcom-build-utils-ref: development - debian-ref: debian/latest + debian-ref: debian/qcom-next push-to-repo: true run-abi-checker: true is-post-merge: true @@ -123,7 +123,7 @@ flowchart TD A[Workflow Called with upstream-tag] --> B[Normalize Tag Version
v1.0.0 → 1.0.0] B --> C[Checkout qcom-build-utils] C --> D[Checkout Package Repository] - D --> E[Checkout debian/latest and upstream/latest] + D --> E[Checkout debian/qcom-next and upstream/latest] E --> F{Tag already exists?} F -->|Yes| G[Fail: Tag already integrated] F -->|No| H[Add Upstream Repository as Remote] @@ -131,7 +131,7 @@ flowchart TD I --> J{upstream/latest exists?} J -->|No| K[Create upstream/latest from tag] J -->|Yes| L[Fast-forward merge to tag] - K --> M[Checkout debian/latest] + K --> M[Checkout debian/qcom-next] L --> M M --> N[Create debian/pr/version-1 branch] N --> O[Merge upstream tag into debian branch] @@ -216,7 +216,7 @@ flowchart TD C --> D[Checkout Upstream PR Branch] D --> E[Tag PR as upstream/pr] E --> F[Add Upstream as Remote to Package Repo] - F --> G[Checkout debian/latest] + F --> G[Checkout debian/qcom-next] G --> H[Create debian/upstream-pr branch] H --> I[Parse Current Version] I --> J[Import PR with gbp
Version: upstream~prNNN] @@ -257,7 +257,7 @@ flowchart TD 1. **Checkout Repositories**: Clone qcom-build-utils, package repo, and upstream PR 2. **Tag Upstream PR**: Create `upstream/pr` tag on the PR branch 3. **Add Remote**: Add upstream repo as remote to package repo -4. **Merge PR Changes**: Create test branch and merge PR into debian/latest +4. **Merge PR Changes**: Create test branch and merge PR into debian/qcom-next 5. **Version Manipulation**: Create special version with `~pr{number}` suffix 6. **Import with gbp**: Use git-buildpackage to import the PR 7. **Promote Changelog**: Update changelog for test build diff --git a/docs/workflow-architecture.md b/docs/workflow-architecture.md index 07084a2..e243b87 100644 --- a/docs/workflow-architecture.md +++ b/docs/workflow-architecture.md @@ -19,7 +19,7 @@ graph TB subgraph "Package Repository (pkg-*)" PR[Pull Request] - PM[Push to debian/latest] + PM[Push to debian/qcom-next] end subgraph "qcom-build-utils" @@ -159,7 +159,7 @@ pkg-mypackage/ **Branch Structure**: - `main` - Primary development branch -- `debian/latest` - Latest Debian packaging branch (build target) +- `debian/qcom-next` - Latest Debian packaging branch (build target) - `debian/` - Specific version branches - `upstream/latest` - Latest upstream source code (for non-native packages) - `upstream/` - Specific upstream version tags @@ -193,7 +193,7 @@ pkg-mypackage/ ### Pre-merge Flow (Pull Requests) -When a pull request is opened against `debian/latest` in a package repository: +When a pull request is opened against `debian/qcom-next` in a package repository: ```mermaid sequenceDiagram @@ -203,7 +203,7 @@ sequenceDiagram participant Build as build_package Action participant ABI as abi_checker Action - Dev->>PR: Opens PR to debian/latest + Dev->>PR: Opens PR to debian/qcom-next PR->>RW: Triggers pre-merge workflow RW->>Build: Build package for PR branch Build-->>RW: Build artifacts @@ -217,7 +217,7 @@ sequenceDiagram ```yaml on: pull_request_target: - branches: [ debian/latest ] + branches: [ debian/qcom-next ] jobs: build: @@ -232,7 +232,7 @@ jobs: ### Post-merge Flow (Merged PRs) -When a PR is merged to `debian/latest`: +When a PR is merged to `debian/qcom-next`: ```mermaid sequenceDiagram @@ -244,9 +244,9 @@ sequenceDiagram participant Push as push_to_repo Action participant Repo as pkg-oss-staging-repo - Dev->>PM: Merges PR to debian/latest + Dev->>PM: Merges PR to debian/qcom-next PM->>RW: Triggers post-merge workflow - RW->>Build: Build package from debian/latest + RW->>Build: Build package from debian/qcom-next Build-->>RW: Build artifacts RW->>ABI: Check ABI compatibility ABI-->>RW: ABI check results @@ -262,14 +262,14 @@ sequenceDiagram ```yaml on: push: - branches: [ debian/latest ] + branches: [ debian/qcom-next ] jobs: build: uses: qualcomm-linux/qcom-build-utils/.github/workflows/qcom-build-pkg-reusable-workflow.yml@development with: qcom-build-utils-ref: development - debian-ref: debian/latest + debian-ref: debian/qcom-next push-to-repo: true run-abi-checker: true is-post-merge: true @@ -314,7 +314,7 @@ sequenceDiagram Up->>RW: Triggers pkg-build-pr-check workflow RW->>Pkg: Clone packaging repo RW->>Up: Clone PR branch - RW->>Pkg: Merge upstream PR into debian/latest + RW->>Pkg: Merge upstream PR into debian/qcom-next RW->>Build: Build test package Build-->>RW: Build artifacts RW->>ABI: Check ABI compatibility diff --git a/scripts/create_data_tar.py b/scripts/create_data_tar.py deleted file mode 100644 index ab3034c..0000000 --- a/scripts/create_data_tar.py +++ /dev/null @@ -1,203 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. -# -# SPDX-License-Identifier: BSD-3-Clause-Clear -""" -create_data_tar.py - -Standalone utility to: -- Parse a .changes file (provided via --changes-file, or auto-detected) -- Extract each referenced .deb into data/// -- Pack the data/ directory as .tar.gz in the same directory as the .changes file -""" - -import os -import sys -import argparse -import glob -import re -import tarfile -import subprocess -import traceback - -from color_logger import logger - - -def parse_arguments(): - parser = argparse.ArgumentParser( - description="Generate data.tar.gz by extracting deb contents to data/// from a .changes file." - ) - parser.add_argument( - "--changes-file", - required=False, - default="", - help="Path to the .changes file. If not provided, the newest .changes in --output-dir will be used." - ) - parser.add_argument( - "--output-dir", - required=False, - default=".", - help="Directory to search for the newest .changes when --changes-file is not provided. Also used as default working dir." - ) - parser.add_argument( - "--arch", - required=False, - default="arm64", - help="Architecture subfolder under each package directory (default: arm64)." - ) - return parser.parse_args() - - -def find_changes_file(changes_file: str, output_dir: str) -> str: - """ - Return the path to the .changes file to use. - Priority: - 1) If changes_file is provided and exists, use it. - 2) Else, find newest *.changes in output_dir. - """ - if changes_file: - if os.path.exists(changes_file): - return os.path.abspath(changes_file) - else: - raise FileNotFoundError(f"Specified --changes-file not found: {changes_file}") - - # Search for newest .changes in output_dir - candidates = glob.glob(os.path.join(output_dir or '.', '*.changes')) - if not candidates: - raise FileNotFoundError(f"No .changes files found in directory: {output_dir}") - - newest = max(candidates, key=lambda p: os.path.getmtime(p)) - return os.path.abspath(newest) - - -def collect_debs_from_changes(changes_path: str): - """ - Read the .changes file and collect referenced .deb filenames. - Returns a list of basenames (or relative names) as they appear in the changes file. - """ - try: - with open(changes_path, 'r', encoding='utf-8', errors='ignore') as f: - text = f.read() - except Exception as e: - raise RuntimeError(f"Failed to read .changes file {changes_path}: {e}") - - # Regex to capture *.deb tokens - debs = [fn for _, fn in re.findall(r'(^|\\s)([^\\s]+\\.deb)\\b', text)] - if not debs: - # Fallback: simple tokenization - for line in text.splitlines(): - if '.deb' in line: - for tok in line.split(): - if tok.endswith('.deb'): - debs.append(tok) - - # De-duplicate, keep order - uniq = list(dict.fromkeys(debs)) - if not uniq: - raise RuntimeError(f"No .deb files referenced in .changes file: {changes_path}") - return uniq - - -def extract_debs_to_data(deb_names, work_dir, arch) -> bool: - """ - For each deb in deb_names (relative to work_dir), extract with dpkg-deb -x - into work_dir/data/// - Returns True if at least one deb was extracted successfully. - """ - data_root = os.path.join(work_dir, 'data') - os.makedirs(data_root, exist_ok=True) - - extracted_any = False - for deb_name in deb_names: - deb_path = deb_name if os.path.isabs(deb_name) else os.path.join(work_dir, deb_name) - if not os.path.exists(deb_path): - logger.warning(f"Referenced .deb not found: {deb_path} (skipping)") - continue - - base = os.path.basename(deb_path) - # Expected: __.deb, fall back to stem if no underscores - pkg = base.split('_')[0] if '_' in base else os.path.splitext(base)[0] - dest_dir = os.path.join(data_root, pkg, arch) - os.makedirs(dest_dir, exist_ok=True) - - logger.debug(f"Extracting {deb_path} -> {dest_dir}") - try: - subprocess.run(['dpkg-deb', '-x', deb_path, dest_dir], check=True) - extracted_any = True - except FileNotFoundError: - logger.error("dpkg-deb not found on host. Install dpkg tools to enable extraction.") - return False - except subprocess.CalledProcessError as e: - logger.error(f"dpkg-deb failed extracting {deb_path}: {e}") - - if not extracted_any: - logger.error("No .deb files were successfully extracted.") - return False - return True - - -def create_tar_of_data(work_dir: str, tar_name: str) -> str: - """ - Create work_dir/ containing the data/ directory. - Returns the path to the tarball on success. - """ - data_root = os.path.join(work_dir, 'data') - if not os.path.isdir(data_root): - raise RuntimeError(f"Missing data directory to archive: {data_root}") - - tar_path = os.path.join(work_dir, tar_name) - logger.debug(f"Creating tarball: {tar_path}") - with tarfile.open(tar_path, 'w:gz') as tar: - tar.add(data_root, arcname='data') - return tar_path - - -def main(): - args = parse_arguments() - - # Determine the .changes file - try: - changes_path = find_changes_file(args.changes_file, args.output_dir) - except Exception as e: - logger.critical(str(e)) - sys.exit(1) - - # The working directory is where the .changes was generated (and where the debs are expected) - work_dir = os.path.dirname(changes_path) - logger.debug(f"Using .changes file: {changes_path}") - logger.debug(f"Working directory: {work_dir}") - - # Collect debs from the changes file - try: - deb_names = collect_debs_from_changes(changes_path) - except Exception as e: - logger.critical(str(e)) - sys.exit(1) - - # Extract each deb into data/// - ok = extract_debs_to_data(deb_names, work_dir, args.arch) - if not ok: - sys.exit(1) - - # Create tarball named after the .changes file (e.g., pkg_1.0_arm64.tar.gz) - try: - base = os.path.basename(changes_path) - tar_name = re.sub(r'\.changes$', '.tar.gz', base) - if tar_name == base: - tar_name = base + '.tar.gz' - tar_path = create_tar_of_data(work_dir, tar_name) - logger.info(f"Created tarball: {tar_path}") - except Exception as e: - logger.critical(f"Failed to create tarball: {e}") - sys.exit(1) - - sys.exit(0) - - -if __name__ == "__main__": - try: - main() - except Exception as e: - logger.critical(f"Uncaught exception: {e}") - traceback.print_exc() - sys.exit(1) diff --git a/scripts/create_promotion_pr.py b/scripts/create_promotion_pr.py index 5f6a6d5..96fa38b 100755 --- a/scripts/create_promotion_pr.py +++ b/scripts/create_promotion_pr.py @@ -60,7 +60,7 @@ def create_pr_body(base_branch: str, upstream_tag: str, normalized_version: str) - To its left, the 'upstream/latest' branch lives is this repo, and represents a copy of the upstream repo (and it has already happened during the promotion workflow run). The commit tagged 'upstream/{normalized_version}' is a merge from the upstream tag {upstream_tag} commit where in addition, special git wizardry happened to perform a special filtering of any potential upstream .github/ and debian/ folders have been filtered out, - and only homonym folders from the debian/latest branch have been kept. + and only homonym folders from the debian/qcom-next branch have been kept. - To its left, this 'debian/pr/{normalized_version}-1' branch was created during the promotion workflow and is the head branch of this PR. It represents the merge of the upstream/latest branch into {base_branch}. - Note that an extra commit for updating the debian/changelog file to reflect the new version {normalized_version}-1 has been added on top of that merge. diff --git a/scripts/docker_deb_build.py b/scripts/docker_deb_build.py deleted file mode 100755 index d7d2cb9..0000000 --- a/scripts/docker_deb_build.py +++ /dev/null @@ -1,359 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. -# -# SPDX-License-Identifier: BSD-3-Clause-Clear - -""" -docker_deb_build.py - -Helper script to build a debian package using the container from the Dockerfile in the docker/ folder. -""" - -import os -import sys -import argparse -import subprocess -import traceback -import platform -import shutil -import urllib.request -import glob -import grp -import pwd -import getpass - -from color_logger import logger - -def parse_arguments(): - parser = argparse.ArgumentParser(description="Build a debian package inside a docker container.") - - parser.add_argument("--source-dir", - required=False, - default=".", - help="Path to the source directory containing the debian package source.") - - parser.add_argument("--output-dir", - required=False, - default="..", - help="Path to the output directory for the built package.") - - parser.add_argument("--distro", - type=str, - choices=['noble', 'questing'], - default='noble', - help="The target distribution for the package build.") - - parser.add_argument("--run-lintian", - action='store_true', - help="Run lintian on the package.") - - parser.add_argument("--extra-repo", - type=str, - action='append', - default=[], - help="Additional APT repository to include. Can be specified multiple times. Example: 'deb [arch=arm64 trusted=yes] http://pkg.qualcomm.com noble/stable main'") - - parser.add_argument("--rebuild", - action='store_true', - help="Rebuild the package if it already exists.") - - args = parser.parse_args() - - return args - -def check_docker_dependencies(timeout=20): - """ - Verify docker CLI presence, daemon accessibility, and user permission to talk to the daemon. - """ - - # 1) docker binary present - if shutil.which("docker") is None: - raise Exception("docker CLI not found. Install Docker: https://docs.docker.com/get-docker/") - - # 2) try contacting the daemon - try: - p = subprocess.run(["docker", "info"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, - check=True, timeout=timeout) - logger.info("Docker CLI and daemon reachable.") - return True - - except subprocess.CalledProcessError as e: - err = (e.stderr or b"").decode(errors="ignore") + (e.stdout or b"").decode(errors="ignore") - err_l = err.lower() - sock = "/var/run/docker.sock" - - # permission issue -> check group on the socket - if "permission denied" in err_l or "access denied" in err_l or "cannot connect to the docker daemon" in err_l: - if os.path.exists(sock): - st = os.stat(sock) - try: - sock_group = grp.getgrgid(st.st_gid).gr_name - except KeyError: - sock_group = f"gid:{st.st_gid}" - user = getpass.getuser() - # gather groups for user - user_groups = [g.gr_name for g in grp.getgrall() if user in g.gr_mem] - primary_gid = pwd.getpwnam(user).pw_gid - try: - primary_group = grp.getgrgid(primary_gid).gr_name - user_groups.append(primary_group) - except KeyError: - pass - - if sock_group not in user_groups: - raise Exception( - f"Permission denied accessing Docker socket ({sock}). Current user '{user}' is not in the socket group '{sock_group}'.\n" - f"Add the user to the group: \"sudo usermod -aG {sock_group} $USER\" (then re-login) or run the script with sudo.\n" - f"Also, to avoid having to do a complete logout/login, you can run: \"newgrp {sock_group}\" which will start a new shell with the new group applied." - ) - else: - # user is in group but still cannot connect -> daemon likely stopped - raise Exception( - "Docker socket exists and group membership OK, but 'docker info' failed. Is the Docker daemon running?\n" - "Try: sudo systemctl start docker (or check your platform's docker service)." - ) - else: - raise Exception( - "Cannot contact Docker daemon and /var/run/docker.sock does not exist. Is the Docker engine installed and running?\n" - "Try: sudo systemctl start docker" - ) - else: - # generic failure - raise Exception(f"Failed to contact Docker daemon: {err.strip() or e}") - - except subprocess.TimeoutExpired: - raise Exception("Timed out while trying to contact the Docker daemon. Is it running?") - -def build_docker_image(image, arch, distro): - this_script_dir = os.path.dirname(os.path.abspath(__file__)) - docker_dir = os.path.normpath(os.path.join(this_script_dir, '..', 'docker')) - context_dir = docker_dir - dockerfile_name = f"Dockerfile.{arch}.{distro}" - dockerfile_path = os.path.join(docker_dir, dockerfile_name) - - logger.debug(f"Building docker image '{image}' for arch '{arch}' from Dockerfile: {dockerfile_path}") - - if not os.path.exists(dockerfile_path): - logger.error(f"No local Dockerfile found for arch '{arch}' at expected path: {dockerfile_path}. Cannot build image '{image}'.") - return False - - build_cmd = ["docker", "build", "-t", image, "-f", dockerfile_path, context_dir] - - logger.debug(f"Running: {' '.join(build_cmd)}") - - # Stream build output live so the user sees progress - try: - proc = subprocess.Popen(build_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, text=True) - try: - for line in proc.stdout: - # print to terminal immediately - sys.stdout.write(line) - sys.stdout.flush() - # also log the line - #logger.debug(line.rstrip()) - - rc = proc.wait() - - except KeyboardInterrupt: - proc.terminate() - proc.wait() - raise - - if rc != 0: - raise Exception(f"Failed to build docker image from {dockerfile_path} (exit {rc}).") - - logger.info(f"Successfully built image '{image}'.") - return True - except subprocess.TimeoutExpired: - proc.kill() - raise Exception(f"Timed out while building docker image from {dockerfile_path}.") - -def rebuild_docker_image(image_base, arch, distro): - """ - Force rebuild of the given docker image from local Dockerfile. - """ - image = f"{image_base}{distro}" - - logger.debug(f"Rebuilding docker image '{image}' from local Dockerfile...") - - # Delete/purge the current image if it exists - - try: - subprocess.run(["docker", "image", "rm", "-f", image], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True) - logger.info(f"Deleted existing image '{image}'.") - except subprocess.CalledProcessError: - logger.debug(f"No existing image '{image}' to delete.") - - # Build the image - build_docker_image(image, arch, distro) - -def check_docker_image(image_base, arch, distro): - """ - Ensure the given docker image is available locally. If not, look for a local Dockerfile - in ../docker named `Dockerfile.{arch}`. - Raises an Exception with actionable guidance on failure. - """ - - image = f"{image_base}{distro}" - - logger.debug(f"Checking for docker image: {image}") - - # 1) check if image exists locally - try: - subprocess.run(["docker", "image", "inspect", image], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, - check=True, timeout=10) - logger.info(f"Docker image '{image}' is present locally.") - return True - except subprocess.CalledProcessError: - logger.warning(f"Docker image '{image}' not found locally.") - except subprocess.TimeoutExpired: - raise Exception("Timed out while checking local docker images.") - - # Since the image is not present locally, try to build it from local Dockerfile - build_docker_image(image, arch, distro) - -def build_package_in_docker(image_base, source_dir, output_dir, build_arch, distro, run_lintian: bool, extra_repo: list[str]) -> bool: - """ - Build the debian package inside the given docker image. - source_dir: path to the debian package source (mounted into the container) - output_dir: path to the output directory for the built package (mounted into the container) - build_arch: architecture string for the build (e.g. 'arm64') - distro: target distribution string (e.g. 'noble') - run_lintian: whether to run lintian on the built package - extra_repo: list of additional APT repositories to include - Returns True on success, False on failure. - """ - - image = f"{image_base}{distro}" - - # Register the name of the newest build log in the output_dir in case there are leftovers from a previous build - # So that we can identify if this run produced a newer build log. Sbuild produces .build files with timestamps, - # and one of them is a symlink to the latest build log. - build_log_files = glob.glob(os.path.join(output_dir or '.', '*.build')) - prev_build_log = next((os.readlink(p) for p in build_log_files if os.path.islink(p)), None) - logger.debug(f"Previous build log: {prev_build_log}") - - # Build the gbp command - # The --git-builder value is a single string passed to gbp - extra_repo_option = " ".join(f"--extra-repository='{repo}'" for repo in extra_repo) if extra_repo else "" - lintian_option = '--no-run-lintian' if not run_lintian else "" - sbuild_cmd = f"sbuild --build-dir=/workspace/output --host=arm64 --build={build_arch} --dist={distro} {lintian_option} {extra_repo_option}" - - # Ensure git inside the container treats the mounted checkout as safe - git_safe_cmd = "git config --global --add safe.directory /workspace/src" - gbp_cmd = f"{git_safe_cmd} && gbp buildpackage --git-ignore-branch --git-builder=\"{sbuild_cmd}\"" - - # Decide which build command to run based on debian/source/format in the source tree. - # Prefer 'native' -> run sbuild directly. If the source format uses 'quilt', use gbp. - format_file = os.path.join(source_dir, 'debian', 'source', 'format') - if not os.path.exists(format_file): - raise Exception(f"Missing {format_file}: cannot determine source format (native/quilt). Is the source dir correctly pointing to a debian package source tree?") - - try: - with open(format_file, 'r', errors='ignore') as f: - fmt = f.read().lower() - except Exception as e: - raise Exception(f"Failed to read {format_file}: {e}") - - if 'native' in fmt: - build_cmd = sbuild_cmd - elif 'quilt' in fmt: - build_cmd = gbp_cmd - else: - raise Exception(f"Unsupported debian/source/format in {format_file}. Expected to contain 'native' or 'quilt', got: {fmt!r}") - - docker_cmd = [ - 'docker', 'run', '--rm', '--privileged', "-t", - '-v', f"{source_dir}:/workspace/src:Z", - '-v', f"{output_dir}:/workspace/output:Z", - '-w', '/workspace/src', - image, 'bash', '-c', build_cmd - ] - - logger.debug(f"Running build inside container: {' '.join(docker_cmd[:])}") - - try: - # Run and stream output live - res = subprocess.run(docker_cmd, check=False) - except KeyboardInterrupt: - raise - - if res.returncode == 0: - logger.info("✅ Successfully built package") - else: - logger.error("❌ Build failed") - - - build_log_files = glob.glob(os.path.join(output_dir or '.', '*.build')) - new_build_log = next((os.readlink(p) for p in build_log_files if os.path.islink(p)), None) - - if new_build_log == prev_build_log: - logger.debug("ℹ️ No new sbuild log produced during this run.") - else: - logger.debug(f"ℹ️ New sbuild log available at: {os.path.join(output_dir, new_build_log)}") - - return res.returncode == 0 - -def main(): - args = parse_arguments() - - logger.debug(f"Print of the arguments: {args}") - - # In sbuild terms, the build architecture is the architecture of the machine doing the build, - # aka the architecture of the machine running this script. - build_arch = platform.machine() - - logger.debug(f"The builder arch is {build_arch}") - - # Normalize the arch string for use later - if build_arch == "x86_64": - build_arch = "amd64" - logger.debug("The build will be a cross-compilation amd64 -> arm64") - elif build_arch == "aarch64": - build_arch = "arm64" - logger.debug("The build will be a native build arm64 -> arm64") - else: - raise Exception("Invalid base arch") - - # Verify Docker is available and the current user can talk to the daemon - check_docker_dependencies() - - image_base = f"ghcr.io/qualcomm-linux/pkg-builder:{build_arch}-" - - # If --rebuild is specified, force rebuild of the docker image and exit - if args.rebuild: - rebuild_docker_image(image_base, build_arch, 'noble') - rebuild_docker_image(image_base, build_arch, 'questing') - sys.exit(0) - - # Make sure source and output dirs are absolute paths - if not os.path.isabs(args.source_dir): - args.source_dir = os.path.abspath(args.source_dir) - if not os.path.isabs(args.output_dir): - args.output_dir = os.path.abspath(args.output_dir) - - logger.debug(f"The source dir is {args.source_dir}") - logger.debug(f"The output dir is {args.output_dir}") - - # Ensure the docker image is available, building it from local Dockerfile if needed - check_docker_image(image_base, build_arch, args.distro) - - ret = build_package_in_docker(image_base, args.source_dir, args.output_dir, build_arch, args.distro, args.run_lintian, args.extra_repo) - - if ret: - sys.exit(0) - else: - sys.exit(1) - -if __name__ == "__main__": - - try: - main() - - except Exception as e: - logger.critical(f"Uncaught exception : {e}") - - traceback.print_exc() - - sys.exit(1) \ No newline at end of file