diff --git a/.github/workflows/pr-commit-processing.yml b/.github/workflows/pr-commit-processing.yml new file mode 100644 index 0000000000000..de86c41249c8b --- /dev/null +++ b/.github/workflows/pr-commit-processing.yml @@ -0,0 +1,167 @@ +name: PR Commit Processing + +on: + pull_request: + types: [opened, synchronize, reopened] + +permissions: + contents: read + pull-requests: write + +jobs: + commit-validation: + runs-on: ubuntu-latest + + steps: + - name: Checkout kernel-src-tree + uses: actions/checkout@v4 + with: + fetch-depth: 0 + ref: ${{ github.head_ref }} + + - name: Fetch base branch + run: | + git fetch origin ${{ github.base_ref }}:${{ github.base_ref }} + + - name: Checkout kernel-src-tree-tools + uses: actions/checkout@v4 + with: + repository: ctrliq/kernel-src-tree-tools + ref: '{jmaple}_pr_jira_test' + path: kernel-src-tree-tools + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.x' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install jira + + # ============================================================ + # Step 1: Upstream Commit Check + # ============================================================ + + - name: Download check_kernel_commits.py + run: | + curl -sL \ + https://raw.githubusercontent.com/ctrliq/kernel-src-tree-tools/mainline/check_kernel_commits.py \ + -o check_kernel_commits.py + chmod +x check_kernel_commits.py + + - name: Run upstream fixes check + id: checkkernel + run: | + python3 check_kernel_commits.py --repo . --pr_branch "${{ github.head_ref }}" --base_branch "${{ github.base_ref }}" --markdown | tee result.txt + # Save non-empty results for PR comment + if grep -q -v "All referenced commits exist upstream and have no Fixes: tags." result.txt; then + echo "has_findings=true" >> $GITHUB_OUTPUT + fi + + - name: Comment on PR if upstream issues found + if: steps.checkkernel.outputs.has_findings == 'true' + env: + GH_TOKEN: ${{ github.token }} + run: | + gh pr comment ${{ github.event.pull_request.number }} \ + --body "$(cat result.txt)" \ + --repo ${{ github.repository }} + + # ============================================================ + # Step 2: JIRA PR Check + # ============================================================ + + - name: Mask JIRA credentials + run: | + echo "::add-mask::${{ secrets.JIRA_API_USER }}" + echo "::add-mask::${{ secrets.JIRA_API_TOKEN }}" + + - name: Run JIRA PR Check + id: jira_check + continue-on-error: true + env: + JIRA_URL: ${{ secrets.JIRA_URL }} + JIRA_API_USER: ${{ secrets.JIRA_API_USER }} + JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }} + run: | + cd kernel-src-tree-tools + + # Run script and capture output, ensuring credentials are never echoed + set +x # Disable command echo to prevent credential exposure + set +e # Don't exit on error, we want to capture the output + OUTPUT=$(python3 jira_pr_check.py \ + --jira-url "${JIRA_URL}" \ + --jira-user "${JIRA_API_USER}" \ + --jira-key "${JIRA_API_TOKEN}" \ + --kernel-src-tree .. \ + --merge-target ${{ github.base_ref }} \ + --pr-branch ${{ github.head_ref }} 2>&1) + EXIT_CODE=$? + + # Filter out any potential credential leaks from output + FILTERED_OUTPUT=$(echo "$OUTPUT" | grep -v "jira-user\|jira-key\|basic_auth\|Authorization" || true) + + echo "$FILTERED_OUTPUT" + echo "output<> $GITHUB_OUTPUT + echo "$FILTERED_OUTPUT" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + + # Check if there are any issues based on output patterns + if echo "$FILTERED_OUTPUT" | grep -q "❌ Errors:"; then + echo "has_issues=true" >> $GITHUB_OUTPUT + + # Check specifically for LTS mismatch errors + if echo "$FILTERED_OUTPUT" | grep -q "expects branch"; then + echo "has_lts_mismatch=true" >> $GITHUB_OUTPUT + else + echo "has_lts_mismatch=false" >> $GITHUB_OUTPUT + fi + elif echo "$FILTERED_OUTPUT" | grep -q "⚠️ Warnings:"; then + echo "has_issues=true" >> $GITHUB_OUTPUT + echo "has_lts_mismatch=false" >> $GITHUB_OUTPUT + else + echo "has_issues=false" >> $GITHUB_OUTPUT + echo "has_lts_mismatch=false" >> $GITHUB_OUTPUT + fi + + # Exit with the script's exit code + exit $EXIT_CODE + + - name: Comment PR with JIRA issues + if: steps.jira_check.outputs.has_issues == 'true' + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const output = process.env.CHECK_OUTPUT; + + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: output + }); + env: + CHECK_OUTPUT: ${{ steps.jira_check.outputs.output }} + + - name: Request changes if LTS mismatch + if: steps.jira_check.outputs.has_lts_mismatch == 'true' + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + github.rest.pulls.createReview({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: context.issue.number, + event: 'REQUEST_CHANGES', + body: '⚠️ This PR contains VULN tickets that do not match the target LTS product. Please review the JIRA ticket assignments and ensure they match the merge target branch.' + }); + + - name: Fail workflow if JIRA errors found + if: steps.jira_check.outcome == 'failure' + run: | + echo "❌ JIRA PR check failed - errors were found in one or more commits" + exit 1 diff --git a/.github/workflows/upstream-commit-check.yml b/.github/workflows/upstream-commit-check.yml deleted file mode 100644 index e95c4e904f8e4..0000000000000 --- a/.github/workflows/upstream-commit-check.yml +++ /dev/null @@ -1,56 +0,0 @@ -name: Check Kernel Commits for Upstream Fixes - -on: - pull_request: - types: [opened, synchronize, reopened] - -permissions: - contents: read - pull-requests: write - -jobs: - check-upstream-fixes: - runs-on: ubuntu-latest - - steps: - - name: Checkout PR branch - uses: actions/checkout@v4 - with: - repository: ${{ github.event.pull_request.head.repo.full_name }} - fetch-depth: 0 - ref: ${{ github.head_ref }} - - - name: Checkout base branch - run: | - git remote add base_repo https://github.com/${{ github.repository }}.git - git fetch base_repo ${{ github.base_ref }}:${{ github.base_ref }} - - - name: Download check_kernel_commits.py - run: | - curl -sL \ - https://raw.githubusercontent.com/ctrliq/kernel-src-tree-tools/mainline/check_kernel_commits.py \ - -o check_kernel_commits.py - chmod +x check_kernel_commits.py - - - name: Set up Python - uses: actions/setup-python@v5 - with: - python-version: '3.x' - - - name: Run upstream fixes check - id: checkkernel - run: | - python3 check_kernel_commits.py --repo . --pr_branch "${{ github.head_ref }}" --base_branch "${{ github.base_ref }}" --markdown | tee result.txt - # Save non-empty results for PR comment - if grep -q -v "All referenced commits exist upstream and have no Fixes: tags." result.txt; then - echo "has_findings=true" >> $GITHUB_OUTPUT - fi - - - name: Comment on PR if issues found - if: steps.checkkernel.outputs.has_findings == 'true' - env: - GH_TOKEN: ${{ github.token }} - run: | - gh pr comment ${{ github.event.pull_request.number }} \ - --body "$(cat result.txt)" \ - --repo ${{ github.repository }} diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 33aa34a0bff34..98f5634917702 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3431,8 +3431,6 @@ static void svm_inject_irq(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - BUG_ON(!(gif_set(svm))); - trace_kvm_inj_virq(vcpu->arch.interrupt.nr); ++vcpu->stat.irq_injections; diff --git a/crypto/seqiv.c b/crypto/seqiv.c index e80959ef32dce..1ee6f6cd285ca 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -30,7 +30,7 @@ static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err) struct aead_request *subreq = aead_request_ctx(req); struct crypto_aead *geniv; - if (err == -EINPROGRESS) + if (err == -EINPROGRESS || err == -EBUSY) return; if (err) diff --git a/fs/nfs/export.c b/fs/nfs/export.c index fd67dc0b13a51..ceeeb0f7bd2da 100644 --- a/fs/nfs/export.c +++ b/fs/nfs/export.c @@ -66,14 +66,21 @@ nfs_fh_to_dentry(struct super_block *sb, struct fid *fid, { struct nfs_fattr *fattr = NULL; struct nfs_fh *server_fh = nfs_exp_embedfh(fid->raw); - size_t fh_size = offsetof(struct nfs_fh, data) + server_fh->size; + size_t fh_size = offsetof(struct nfs_fh, data); const struct nfs_rpc_ops *rpc_ops; struct dentry *dentry; struct inode *inode; - int len = EMBED_FH_OFF + XDR_QUADLEN(fh_size); + int len = EMBED_FH_OFF; u32 *p = fid->raw; int ret; + /* Initial check of bounds */ + if (fh_len < len + XDR_QUADLEN(fh_size) || + fh_len > XDR_QUADLEN(NFS_MAXFHSIZE)) + return NULL; + /* Calculate embedded filehandle size */ + fh_size += server_fh->size; + len += XDR_QUADLEN(fh_size); /* NULL translates to ESTALE */ if (fh_len < len || fh_type != len) return NULL; diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index c77f80c58086b..8ad66fc28941c 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -6336,9 +6336,14 @@ static inline int l2cap_le_command_rej(struct l2cap_conn *conn, if (!chan) goto done; + chan = l2cap_chan_hold_unless_zero(chan); + if (!chan) + goto done; + l2cap_chan_lock(chan); l2cap_chan_del(chan, ECONNREFUSED); l2cap_chan_unlock(chan); + l2cap_chan_put(chan); done: mutex_unlock(&conn->chan_lock); diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index c0883fa3d3bda..593757852564b 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -964,6 +964,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (cl != NULL) { int old_flags; + int len = 0; if (parentid) { if (cl->cl_parent && @@ -994,9 +995,13 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (usc != NULL) hfsc_change_usc(cl, usc, cur_time); + if (cl->qdisc->q.qlen != 0) + len = qdisc_peek_len(cl->qdisc); + /* Check queue length again since some qdisc implementations + * (e.g., netem/codel) might empty the queue during the peek + * operation. + */ if (cl->qdisc->q.qlen != 0) { - int len = qdisc_peek_len(cl->qdisc); - if (cl->cl_flags & HFSC_RSC) { if (old_flags & HFSC_RSC) update_ed(cl, len); diff --git a/net/sctp/input.c b/net/sctp/input.c index 892028ce5891a..1aa32f5cd0f8c 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -129,7 +129,7 @@ int sctp_rcv(struct sk_buff *skb) * it's better to just linearize it otherwise crc computing * takes longer. */ - if ((!is_gso && skb_linearize(skb)) || + if (((!is_gso || skb_cloned(skb)) && skb_linearize(skb)) || !pskb_may_pull(skb, sizeof(struct sctphdr))) goto discard_it; diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index 748a3c40966e9..c19164089ae4e 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -4399,7 +4399,7 @@ static int add_tuning_control(struct hda_codec *codec, } knew.private_value = HDA_COMPOSE_AMP_VAL(nid, 1, 0, type); - sprintf(namestr, "%s %s Volume", name, dirstr[dir]); + snprintf(namestr, sizeof(namestr), "%s %s Volume", name, dirstr[dir]); return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec)); } diff --git a/sound/usb/stream.c b/sound/usb/stream.c index e9227751cf7ea..de08132c745c7 100644 --- a/sound/usb/stream.c +++ b/sound/usb/stream.c @@ -336,20 +336,28 @@ snd_pcm_chmap_elem *convert_chmap_v3(struct uac3_cluster_header_descriptor len = le16_to_cpu(cluster->wLength); c = 0; - p += sizeof(struct uac3_cluster_header_descriptor); + p += sizeof(*cluster); + len -= sizeof(*cluster); - while (((p - (void *)cluster) < len) && (c < channels)) { + while (len > 0 && (c < channels)) { struct uac3_cluster_segment_descriptor *cs_desc = p; u16 cs_len; u8 cs_type; + if (len < sizeof(*p)) + break; cs_len = le16_to_cpu(cs_desc->wLength); + if (len < cs_len) + break; cs_type = cs_desc->bSegmentType; if (cs_type == UAC3_CHANNEL_INFORMATION) { struct uac3_cluster_information_segment_descriptor *is = p; unsigned char map; + if (cs_len < sizeof(*is)) + break; + /* * TODO: this conversion is not complete, update it * after adding UAC3 values to asound.h @@ -451,6 +459,7 @@ snd_pcm_chmap_elem *convert_chmap_v3(struct uac3_cluster_header_descriptor chmap->map[c++] = map; } p += cs_len; + len -= cs_len; } if (channels < c) @@ -871,7 +880,7 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip, u64 badd_formats = 0; unsigned int num_channels; struct audioformat *fp; - u16 cluster_id, wLength; + u16 cluster_id, wLength, cluster_wLength; int clock = 0; int err; @@ -998,6 +1007,16 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip, return ERR_PTR(-EIO); } + cluster_wLength = le16_to_cpu(cluster->wLength); + if (cluster_wLength < sizeof(*cluster) || + cluster_wLength > wLength) { + dev_err(&dev->dev, + "%u:%d : invalid Cluster Descriptor size\n", + iface_no, altno); + kfree(cluster); + return ERR_PTR(-EIO); + } + num_channels = cluster->bNrChannels; chmap = convert_chmap_v3(cluster); kfree(cluster);