From ec429ea76c845c521375a2f96ac8e183ac296660 Mon Sep 17 00:00:00 2001 From: Michiel Derhaeg Date: Thu, 2 Oct 2025 15:39:29 +0200 Subject: [PATCH 1/3] arcv: Correct mode and parameters for XARCV instructions - Widening/narrowing instructions now use the correct mode. The correct vsetvli parameters will now be emitted - All of the assembly templates now refer to the correct operands. - The tests have been updated to be more precise and also verify that the correct vsetvli parameters are emitted. Signed-off-by: Michiel Derhaeg --- gcc/config/riscv/arcv-vector.md | 210 +++++++++--------- .../riscv/arcv-mxmb-vqmxm4_vv-compile-1.c | 30 ++- .../riscv/arcv-mxmb-vqmxm4su_vv-compile-1.c | 30 ++- .../riscv/arcv-mxmb-vqmxm4u_vv-compile-1.c | 30 ++- .../riscv/arcv-mxmc-vqmxm8_vv-compile-1.c | 30 ++- .../riscv/arcv-mxmc-vqmxm8su_vv-compile-1.c | 30 ++- .../riscv/arcv-mxmc-vqmxm8u_vv-compile-1.c | 30 ++- .../riscv/arcv-mxmd-vqmxm16_vv-compile-1.c | 30 ++- .../riscv/arcv-mxmd-vqmxm16su_vv-compile-1.c | 30 ++- .../riscv/arcv-mxmd-vqmxm16u_vv-compile-1.c | 30 ++- .../riscv/arcv-vcplx-vcmuli_v-compile-1.c | 56 ++++- .../riscv/arcv-vcplx-vcmulni_v-compile-1.c | 56 ++++- .../riscv/arcv-vcplx-vconj_v-compile-1.c | 56 ++++- .../riscv/arcv-vcplx-veven_v-compile-1.c | 56 ++++- .../arcv-vcplx-vinterleave_vv-compile-1.c | 56 ++++- .../riscv/arcv-vcplx-vodd_v-compile-1.c | 56 ++++- .../riscv/arcv-vcplx-vqcjrdot_vv-compile-1.c | 30 ++- .../riscv/arcv-vcplx-vqcrdot_vv-compile-1.c | 30 ++- .../riscv/arcv-vcplx-vscredsum_vv-compile-1.c | 84 +++++-- .../riscv/arcv-vcplx-vwcredsum_vv-compile-1.c | 56 ++++- .../riscv/arcv-vcplx-vwscjmac_vv-compile-1.c | 59 ++++- .../riscv/arcv-vcplx-vwscjmac_vx-compile-1.c | 59 ++++- .../riscv/arcv-vcplx-vwscjmul_vv-compile-1.c | 59 ++++- .../riscv/arcv-vcplx-vwscjmul_vx-compile-1.c | 59 ++++- .../arcv-vcplx-vwscjnmsac_vv-compile-1.c | 59 ++++- .../arcv-vcplx-vwscjnmsac_vx-compile-1.c | 59 ++++- .../riscv/arcv-vcplx-vwscjrdot_vv-compile-1.c | 56 ++++- .../riscv/arcv-vcplx-vwscmac_vv-compile-1.c | 59 ++++- .../riscv/arcv-vcplx-vwscmac_vx-compile-1.c | 59 ++++- .../riscv/arcv-vcplx-vwscmul_vv-compile-1.c | 59 ++++- .../riscv/arcv-vcplx-vwscmul_vx-compile-1.c | 59 ++++- .../riscv/arcv-vcplx-vwscnmsac_vv-compile-1.c | 59 ++++- .../riscv/arcv-vcplx-vwscnmsac_vx-compile-1.c | 59 ++++- .../riscv/arcv-vcplx-vwscrdot_vv-compile-1.c | 56 ++++- .../riscv/arcv-vdsp-vaddsub_vv-compile-1.c | 84 +++++-- .../riscv/arcv-vdsp-vclr_v_i-compile-1.c | 110 +++++++-- .../riscv/arcv-vdsp-vmv_s_v-compile-1.c | 90 ++++++-- .../riscv/arcv-vdsp-vmv_v_s-compile-1.c | 52 ++++- .../riscv/arcv-vdsp-vmvi_s_v-compile-1.c | 90 ++++++-- .../riscv/arcv-vdsp-vmvi_v_s-compile-1.c | 52 ++++- .../riscv/arcv-vdsp-vnorm_v-compile-1.c | 84 +++++-- .../riscv/arcv-vdsp-vnsra_2s_qi-compile-1.c | 8 +- .../riscv/arcv-vdsp-vnsra_2s_qv-compile-1.c | 8 +- .../riscv/arcv-vdsp-vnsra_2s_qx-compile-1.c | 9 +- .../riscv/arcv-vdsp-vnsra_2s_wi-compile-1.c | 13 +- .../riscv/arcv-vdsp-vnsra_2s_wv-compile-1.c | 12 +- .../riscv/arcv-vdsp-vnsra_2s_wx-compile-1.c | 12 +- .../riscv/arcv-vdsp-vnsra_qi-compile-1.c | 8 +- .../riscv/arcv-vdsp-vnsra_qv-compile-1.c | 8 +- .../riscv/arcv-vdsp-vnsra_qx-compile-1.c | 8 +- .../riscv/arcv-vdsp-vnsra_s_qi-compile-1.c | 8 +- .../riscv/arcv-vdsp-vnsra_s_qv-compile-1.c | 8 +- .../riscv/arcv-vdsp-vnsra_s_qx-compile-1.c | 8 +- .../riscv/arcv-vdsp-vnsra_s_wi-compile-1.c | 12 +- .../riscv/arcv-vdsp-vnsra_s_wv-compile-1.c | 12 +- .../riscv/arcv-vdsp-vnsra_s_wx-compile-1.c | 12 +- .../riscv/arcv-vdsp-vnsra_wi-compile-1.c | 12 +- .../riscv/arcv-vdsp-vnsra_wv-compile-1.c | 12 +- .../riscv/arcv-vdsp-vnsra_wx-compile-1.c | 12 +- .../riscv/arcv-vdsp-vqrdot_2s_vv-compile-1.c | 56 ++++- .../riscv/arcv-vdsp-vqrdot_vv-compile-1.c | 56 ++++- .../riscv/arcv-vdsp-vqrdotsu_vv-compile-1.c | 56 ++++- .../riscv/arcv-vdsp-vqrdotu_vv-compile-1.c | 56 ++++- .../riscv/arcv-vdsp-vsaddsub_vv-compile-1.c | 84 +++++-- .../riscv/arcv-vdsp-vsmulf_hv-compile-1.c | 8 +- .../riscv/arcv-vdsp-vsmulf_hx-compile-1.c | 8 +- .../riscv/arcv-vdsp-vsneg_v-compile-1.c | 110 +++++++-- .../riscv/arcv-vdsp-vsra_2s_vi-compile-1.c | 1 - .../riscv/arcv-vdsp-vsra_s_vi-compile-1.c | 1 - .../riscv/arcv-vdsp-vsrat_vi-compile-1.c | 110 +++++++-- .../riscv/arcv-vdsp-vsrat_vv-compile-1.c | 110 +++++++-- .../riscv/arcv-vdsp-vsrat_vx-compile-1.c | 114 ++++++++-- .../riscv/arcv-vdsp-vssabs_v-compile-1.c | 110 +++++++-- .../riscv/arcv-vdsp-vwmac_hv-compile-1.c | 59 ++++- .../riscv/arcv-vdsp-vwmac_hx-compile-1.c | 59 ++++- .../riscv/arcv-vdsp-vwmacu_hv-compile-1.c | 59 ++++- .../riscv/arcv-vdsp-vwmacu_hx-compile-1.c | 59 ++++- .../riscv/arcv-vdsp-vwmul_hv-compile-1.c | 59 ++++- .../riscv/arcv-vdsp-vwmul_hx-compile-1.c | 59 ++++- .../riscv/arcv-vdsp-vwmulf_hv-compile-1.c | 59 ++++- .../riscv/arcv-vdsp-vwmulf_hx-compile-1.c | 59 ++++- .../riscv/arcv-vdsp-vwmulu_hv-compile-1.c | 59 ++++- .../riscv/arcv-vdsp-vwmulu_hx-compile-1.c | 59 ++++- .../riscv/arcv-vdsp-vwrdot_hv-compile-1.c | 56 ++++- .../riscv/arcv-vdsp-vwrdot_vv-compile-1.c | 84 +++++-- .../riscv/arcv-vdsp-vwrdotsu_vv-compile-1.c | 84 +++++-- .../riscv/arcv-vdsp-vwrdotu_hv-compile-1.c | 56 ++++- .../riscv/arcv-vdsp-vwrdotu_vv-compile-1.c | 84 +++++-- .../riscv/arcv-vdsp-vwsmac_vv-compile-1.c | 87 ++++++-- .../riscv/arcv-vdsp-vwsmac_vx-compile-1.c | 87 ++++++-- .../riscv/arcv-vdsp-vwsmacf_hv-compile-1.c | 59 ++++- .../riscv/arcv-vdsp-vwsmacf_hx-compile-1.c | 59 ++++- .../riscv/arcv-vdsp-vwsnmsac_vv-compile-1.c | 87 ++++++-- .../riscv/arcv-vdsp-vwsnmsac_vx-compile-1.c | 87 ++++++-- .../riscv/arcv-vdsp-vwsnmsacf_hv-compile-1.c | 59 ++++- .../riscv/arcv-vdsp-vwsnmsacf_hx-compile-1.c | 59 ++++- .../riscv/arcv-vdsp-vwsra_vi-compile-1.c | 26 +-- .../riscv/arcv-vdsp-vwsra_vv-compile-1.c | 26 +-- .../riscv/arcv-vdsp-vwsra_vx-compile-1.c | 26 +-- .../riscv/arcv-vdsp-vwsrdot_2s_vv-compile-1.c | 84 +++++-- .../riscv/arcv-vdsp-vwsrdot_vv-compile-1.c | 84 +++++-- .../riscv/arcv-vdsp-vwsrdotf_hv-compile-1.c | 56 ++++- .../riscv/arcv-vsad-vwsad_vv-compile-1.c | 59 ++++- .../riscv/arcv-vsad-vwsadu_vv-compile-1.c | 59 ++++- 104 files changed, 4307 insertions(+), 1111 deletions(-) diff --git a/gcc/config/riscv/arcv-vector.md b/gcc/config/riscv/arcv-vector.md index 1e887fe8dbf3..3420bff3d3e9 100644 --- a/gcc/config/riscv/arcv-vector.md +++ b/gcc/config/riscv/arcv-vector.md @@ -767,7 +767,7 @@ "TARGET_XARCVVDSP" "arcv.vwsra.v%o4\t%0,%3,%4%p1" [(set_attr "type" "vsshift") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwsra_scalar" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -791,7 +791,7 @@ "TARGET_XARCVVDSP" "arcv.vwsra.v%o4\t%0,%3,%4%p1" [(set_attr "type" "vsshift") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_arcv_vaddsub" [(set (match_operand:V_VLSI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -876,9 +876,9 @@ UNSPEC_ARCV_VQRDOT) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vqrdot.v%o3\t%0,%2,%3%p1" + "arcv.vqrdot.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqrdot_2s" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -898,9 +898,9 @@ UNSPEC_ARCV_VQRDOT_2S) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vqrdot.2s.v%o3\t%0,%2,%3%p1" + "arcv.vqrdot.2s.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwsrdot_2s" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -920,9 +920,9 @@ UNSPEC_ARCV_VWSRDOT_2S) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwsrdot.2s.v%o3\t%0,%2,%3%p1" + "arcv.vwsrdot.2s.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqrdotu" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -942,9 +942,9 @@ UNSPEC_ARCV_VQRDOTU) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vqrdotu.v%o3\t%0,%2,%3%p1" + "arcv.vqrdotu.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqrdotsu" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -964,9 +964,9 @@ UNSPEC_ARCV_VQRDOTSU) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vqrdotsu.v%o3\t%0,%2,%3%p1" + "arcv.vqrdotsu.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwrdot" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -986,9 +986,9 @@ UNSPEC_ARCV_VWRDOT) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwrdot.v%o3\t%0,%2,%3%p1" + "arcv.vwrdot.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwrdot" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1009,9 +1009,9 @@ UNSPEC_ARCV_VWRDOT) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwrdot.h%o3\t%0,%2,%3%p1" + "arcv.vwrdot.h%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwsrdot" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1031,9 +1031,9 @@ UNSPEC_ARCV_VWSRDOT) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwsrdot.v%o3\t%0,%2,%3%p1" + "arcv.vwsrdot.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwrdotu" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1053,9 +1053,9 @@ UNSPEC_ARCV_VWRDOTU) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwrdotu.v%o3\t%0,%2,%3%p1" + "arcv.vwrdotu.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwrdotu" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1076,9 +1076,9 @@ UNSPEC_ARCV_VWRDOTU) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwrdotu.h%o3\t%0,%2,%3%p1" + "arcv.vwrdotu.h%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwrdotsu" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1098,9 +1098,9 @@ UNSPEC_ARCV_VWRDOTSU) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwrdotsu.v%o3\t%0,%2,%3%p1" + "arcv.vwrdotsu.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwsmac" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1122,9 +1122,9 @@ UNSPEC_ARCV_VWSMAC) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwsmac.v%o3\t%0,%2,%3%p1" + "arcv.vwsmac.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwsmac_scalar" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1145,9 +1145,9 @@ UNSPEC_ARCV_VWSMAC) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwsmac.v%o3\t%0,%2,%3%p1" + "arcv.vwsmac.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwsnmsac" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1169,9 +1169,9 @@ UNSPEC_ARCV_VWSNMSAC) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwsnmsac.v%o3\t%0,%2,%3%p1" + "arcv.vwsnmsac.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwsnmsac_scalar" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1192,9 +1192,9 @@ UNSPEC_ARCV_VWSNMSAC) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwsnmsac.v%o3\t%0,%2,%3%p1" + "arcv.vwsnmsac.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwmul" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1217,7 +1217,7 @@ "TARGET_XARCVVDSP" "arcv.vwmul.h%o4\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwmul_scalar" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1239,7 +1239,7 @@ "TARGET_XARCVVDSP" "arcv.vwmul.h%o4\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwmac" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1261,9 +1261,9 @@ UNSPEC_ARCV_VWMAC) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwmac.h%o3\t%0,%2,%3%p1" + "arcv.vwmac.h%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwmac_scalar" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1284,9 +1284,9 @@ UNSPEC_ARCV_VWMAC) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwmac.h%o3\t%0,%2,%3%p1" + "arcv.vwmac.h%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwmulu" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1309,7 +1309,7 @@ "TARGET_XARCVVDSP" "arcv.vwmulu.h%o4\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwmulu_scalar" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1331,7 +1331,7 @@ "TARGET_XARCVVDSP" "arcv.vwmulu.h%o4\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwmacu" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1353,9 +1353,9 @@ UNSPEC_ARCV_VWMACU) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwmacu.h%o3\t%0,%2,%3%p1" + "arcv.vwmacu.h%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwmacu_scalar" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1376,9 +1376,9 @@ UNSPEC_ARCV_VWMACU) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwmacu.h%o3\t%0,%2,%3%p1" + "arcv.vwmacu.h%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_half_arcv_vsmulf" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1449,7 +1449,7 @@ "TARGET_XARCVVDSP" "arcv.vwmulf.h%o4\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwmulf_scalar" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1471,7 +1471,7 @@ "TARGET_XARCVVDSP" "arcv.vwmulf.h%o4\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwsmacf" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1493,9 +1493,9 @@ UNSPEC_ARCV_VWSMACF) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwsmacf.h%o3\t%0,%2,%3%p1" + "arcv.vwsmacf.h%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwsmacf_scalar" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1516,9 +1516,9 @@ UNSPEC_ARCV_VWSMACF) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwsmacf.h%o3\t%0,%2,%3%p1" + "arcv.vwsmacf.h%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwsnmsacf" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1540,9 +1540,9 @@ UNSPEC_ARCV_VWSNMSACF) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwsnmsacf.h%o3\t%0,%2,%3%p1" + "arcv.vwsnmsacf.h%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwsnmsacf_scalar" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1563,9 +1563,9 @@ UNSPEC_ARCV_VWSNMSACF) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwsnmsacf.h%o3\t%0,%2,%3%p1" + "arcv.vwsnmsacf.h%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwsrdotf" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1586,9 +1586,9 @@ UNSPEC_ARCV_VWSRDOTF) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwsrdotf.h%o3\t%0,%2,%3%p1" + "arcv.vwsrdotf.h%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_arcv_vconj" [(set (match_operand:V_VLSI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1771,7 +1771,7 @@ "TARGET_XARCVVCPLX" "arcv.vwcredsum.v%o4\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_arcv_vscmul" [(set (match_operand:V_VLSI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1886,7 +1886,7 @@ "TARGET_XARCVVCPLX" "arcv.vwscmul.v%o4\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscmul_scalar" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1908,7 +1908,7 @@ "TARGET_XARCVVCPLX" "arcv.vwscmul.v%o4\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscjmul" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1931,7 +1931,7 @@ "TARGET_XARCVVCPLX" "arcv.vwscjmul.v%o4\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscjmul_scalar" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1953,7 +1953,7 @@ "TARGET_XARCVVCPLX" "arcv.vwscjmul.v%o4\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscmac" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1975,9 +1975,9 @@ UNSPEC_ARCV_VWSCMAC) (match_dup 2)))] "TARGET_XARCVVCPLX" - "arcv.vwscmac.v%o3\t%0,%2,%3%p1" + "arcv.vwscmac.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscmac_scalar" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1998,9 +1998,9 @@ UNSPEC_ARCV_VWSCMAC) (match_dup 2)))] "TARGET_XARCVVCPLX" - "arcv.vwscmac.v%o3\t%0,%2,%3%p1" + "arcv.vwscmac.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscnmsac" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2022,9 +2022,9 @@ UNSPEC_ARCV_VWSCNMSAC) (match_dup 2)))] "TARGET_XARCVVCPLX" - "arcv.vwscnmsac.v%o3\t%0,%2,%3%p1" + "arcv.vwscnmsac.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscnmsac_scalar" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2045,9 +2045,9 @@ UNSPEC_ARCV_VWSCNMSAC) (match_dup 2)))] "TARGET_XARCVVCPLX" - "arcv.vwscnmsac.v%o3\t%0,%2,%3%p1" + "arcv.vwscnmsac.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscjmac" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2069,9 +2069,9 @@ UNSPEC_ARCV_VWSCJMAC) (match_dup 2)))] "TARGET_XARCVVCPLX" - "arcv.vwscjmac.v%o3\t%0,%2,%3%p1" + "arcv.vwscjmac.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscjmac_scalar" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2092,9 +2092,9 @@ UNSPEC_ARCV_VWSCJMAC) (match_dup 2)))] "TARGET_XARCVVCPLX" - "arcv.vwscjmac.v%o3\t%0,%2,%3%p1" + "arcv.vwscjmac.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscjnmsac" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2116,9 +2116,9 @@ UNSPEC_ARCV_VWSCJNMSAC) (match_dup 2)))] "TARGET_XARCVVCPLX" - "arcv.vwscjnmsac.v%o3\t%0,%2,%3%p1" + "arcv.vwscjnmsac.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscjnmsac_scalar" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2139,9 +2139,9 @@ UNSPEC_ARCV_VWSCJNMSAC) (match_dup 2)))] "TARGET_XARCVVCPLX" - "arcv.vwscjnmsac.v%o3\t%0,%2,%3%p1" + "arcv.vwscjnmsac.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscrdot" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2161,9 +2161,9 @@ UNSPEC_ARCV_VWSCRDOT) (match_dup 2)))] "TARGET_XARCVVCPLX" - "arcv.vwscrdot.v%o3\t%0,%2,%3%p1" + "arcv.vwscrdot.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscjrdot" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2183,9 +2183,9 @@ UNSPEC_ARCV_VWSCJRDOT) (match_dup 2)))] "TARGET_XARCVVCPLX" - "arcv.vwscjrdot.v%o3\t%0,%2,%3%p1" + "arcv.vwscjrdot.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqcrdot" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2205,9 +2205,9 @@ UNSPEC_ARCV_VQCRDOT) (match_dup 2)))] "TARGET_XARCVVCPLX" - "arcv.vqcrdot.v%o3\t%0,%2,%3%p1" + "arcv.vqcrdot.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqcjrdot" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2227,9 +2227,9 @@ UNSPEC_ARCV_VQCJRDOT) (match_dup 2)))] "TARGET_XARCVVCPLX" - "arcv.vqcjrdot.v%o3\t%0,%2,%3%p1" + "arcv.vqcjrdot.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwsad" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2252,7 +2252,7 @@ "TARGET_XARCVVSAD" "arcv.vwsad.v%o4\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwsadu" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2275,7 +2275,7 @@ "TARGET_XARCVVSAD" "arcv.vwsadu.v%o4\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqmxm4" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2297,9 +2297,9 @@ UNSPEC_ARCV_VQMXM4) (match_dup 2)))] "TARGET_XARCVMXMB" - "arcv.vqmxm4.v%o3\t%0,%2,%3%p1" + "arcv.vqmxm4.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqmxm4u" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2321,9 +2321,9 @@ UNSPEC_ARCV_VQMXM4U) (match_dup 2)))] "TARGET_XARCVMXMB" - "arcv.vqmxm4u.v%o3\t%0,%2,%3%p1" + "arcv.vqmxm4u.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqmxm4su" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2345,9 +2345,9 @@ UNSPEC_ARCV_VQMXM4SU) (match_dup 2)))] "TARGET_XARCVMXMB" - "arcv.vqmxm4su.v%o3\t%0,%2,%3%p1" + "arcv.vqmxm4su.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqmxm8" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2369,9 +2369,9 @@ UNSPEC_ARCV_VQMXM8) (match_dup 2)))] "TARGET_XARCVMXMC" - "arcv.vqmxm8.v%o3\t%0,%2,%3%p1" + "arcv.vqmxm8.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqmxm8u" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2393,9 +2393,9 @@ UNSPEC_ARCV_VQMXM8U) (match_dup 2)))] "TARGET_XARCVMXMC" - "arcv.vqmxm8u.v%o3\t%0,%2,%3%p1" + "arcv.vqmxm8u.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqmxm8su" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2417,9 +2417,9 @@ UNSPEC_ARCV_VQMXM8SU) (match_dup 2)))] "TARGET_XARCVMXMC" - "arcv.vqmxm8su.v%o3\t%0,%2,%3%p1" + "arcv.vqmxm8su.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqmxm16" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2441,9 +2441,9 @@ UNSPEC_ARCV_VQMXM16) (match_dup 2)))] "TARGET_XARCVMXMD" - "arcv.vqmxm16.v%o3\t%0,%2,%3%p1" + "arcv.vqmxm16.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqmxm16u" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2465,9 +2465,9 @@ UNSPEC_ARCV_VQMXM16U) (match_dup 2)))] "TARGET_XARCVMXMD" - "arcv.vqmxm16u.v%o3\t%0,%2,%3%p1" + "arcv.vqmxm16u.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqmxm16su" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2489,6 +2489,6 @@ UNSPEC_ARCV_VQMXM16SU) (match_dup 2)))] "TARGET_XARCVMXMD" - "arcv.vqmxm16su.v%o3\t%0,%2,%3%p1" + "arcv.vqmxm16su.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) diff --git a/gcc/testsuite/gcc.target/riscv/arcv-mxmb-vqmxm4_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-mxmb-vqmxm4_vv-compile-1.c index b659855823b8..7beb1af88959 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-mxmb-vqmxm4_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-mxmb-vqmxm4_vv-compile-1.c @@ -1,13 +1,31 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_mxmb } */ -/* { dg-options "-march=rv32im_xarcvmxmb -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvmxmb -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m4_t test_vqmxm4_vv_i8 (vint32m4_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vqmxm4_vv_i32m4 (vd, vs1, vs2, vl); } -vint32m4_t test_vqmxm4_vv_i8_m (vbool8_t mask, vint32m4_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vqmxm4_vv_i32m4_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqmxm4\\.vv" 2 } } */ \ No newline at end of file +/* +** test_vqmxm4_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm4\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m4_t test_vqmxm4_vv_i8 (vint32m4_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqmxm4_vv_i32m4 (vd, vs1, vs2, vl); +} + +/* +** test_vqmxm4_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm4\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m4_t test_vqmxm4_vv_i8_m (vbool8_t mask, vint32m4_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqmxm4_vv_i32m4_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-mxmb-vqmxm4su_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-mxmb-vqmxm4su_vv-compile-1.c index dab969dcbbc1..60dab65c69a6 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-mxmb-vqmxm4su_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-mxmb-vqmxm4su_vv-compile-1.c @@ -1,13 +1,31 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_mxmb } */ -/* { dg-options "-march=rv32im_xarcvmxmb -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvmxmb -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m4_t test_vqmxm4su_vv_i8 (vint32m4_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_arcv_vqmxm4su_vv_i32m4 (vd, vs1, vs2, vl); } -vint32m4_t test_vqmxm4su_vv_i8_m (vbool8_t mask, vint32m4_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_arcv_vqmxm4su_vv_i32m4_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqmxm4su\\.vv" 2 } } */ \ No newline at end of file +/* +** test_vqmxm4su_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm4su\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m4_t test_vqmxm4su_vv_i8 (vint32m4_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqmxm4su_vv_i32m4 (vd, vs1, vs2, vl); +} + +/* +** test_vqmxm4su_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm4su\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m4_t test_vqmxm4su_vv_i8_m (vbool8_t mask, vint32m4_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqmxm4su_vv_i32m4_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-mxmb-vqmxm4u_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-mxmb-vqmxm4u_vv-compile-1.c index c5f506b8f184..29fc46b30dd2 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-mxmb-vqmxm4u_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-mxmb-vqmxm4u_vv-compile-1.c @@ -1,13 +1,31 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_mxmb } */ -/* { dg-options "-march=rv32im_xarcvmxmb -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvmxmb -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vuint32m4_t test_vqmxm4u_vv_u8 (vuint32m4_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_arcv_vqmxm4u_vv_u32m4 (vd, vs1, vs2, vl); } -vuint32m4_t test_vqmxm4u_vv_u8_m (vbool8_t mask, vuint32m4_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_arcv_vqmxm4u_vv_u32m4_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqmxm4u\\.vv" 2 } } */ \ No newline at end of file +/* +** test_vqmxm4u_vv_u8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm4u\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vuint32m4_t test_vqmxm4u_vv_u8 (vuint32m4_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqmxm4u_vv_u32m4 (vd, vs1, vs2, vl); +} + +/* +** test_vqmxm4u_vv_u8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm4u\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vuint32m4_t test_vqmxm4u_vv_u8_m (vbool8_t mask, vuint32m4_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqmxm4u_vv_u32m4_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-mxmc-vqmxm8_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-mxmc-vqmxm8_vv-compile-1.c index 10eb15f6c4fe..4652f6c67128 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-mxmc-vqmxm8_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-mxmc-vqmxm8_vv-compile-1.c @@ -1,13 +1,31 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_mxmc } */ -/* { dg-options "-march=rv32im_xarcvmxmc -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvmxmc -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m4_t test_vqmxm8_vv_i8 (vint32m4_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vqmxm8_vv_i32m4 (vd, vs1, vs2, vl); } -vint32m4_t test_vqmxm8_vv_i8_m (vbool8_t mask, vint32m4_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vqmxm8_vv_i32m4_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqmxm8\\.vv" 2 } } */ \ No newline at end of file +/* +** test_vqmxm8_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm8\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m4_t test_vqmxm8_vv_i8 (vint32m4_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqmxm8_vv_i32m4 (vd, vs1, vs2, vl); +} + +/* +** test_vqmxm8_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm8\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m4_t test_vqmxm8_vv_i8_m (vbool8_t mask, vint32m4_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqmxm8_vv_i32m4_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-mxmc-vqmxm8su_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-mxmc-vqmxm8su_vv-compile-1.c index 4acd9cafbb4f..7ab08fc4b1ab 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-mxmc-vqmxm8su_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-mxmc-vqmxm8su_vv-compile-1.c @@ -1,13 +1,31 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_mxmc } */ -/* { dg-options "-march=rv32im_xarcvmxmc -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvmxmc -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m4_t test_vqmxm8su_vv_i8 (vint32m4_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_arcv_vqmxm8su_vv_i32m4 (vd, vs1, vs2, vl); } -vint32m4_t test_vqmxm8su_vv_i8_m (vbool8_t mask, vint32m4_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_arcv_vqmxm8su_vv_i32m4_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqmxm8su\\.vv" 2 } } */ \ No newline at end of file +/* +** test_vqmxm8su_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm8su\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m4_t test_vqmxm8su_vv_i8 (vint32m4_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqmxm8su_vv_i32m4 (vd, vs1, vs2, vl); +} + +/* +** test_vqmxm8su_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm8su\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m4_t test_vqmxm8su_vv_i8_m (vbool8_t mask, vint32m4_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqmxm8su_vv_i32m4_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-mxmc-vqmxm8u_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-mxmc-vqmxm8u_vv-compile-1.c index 54662c1d2e35..b5fa648c207e 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-mxmc-vqmxm8u_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-mxmc-vqmxm8u_vv-compile-1.c @@ -1,13 +1,31 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_mxmc } */ -/* { dg-options "-march=rv32im_xarcvmxmc -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvmxmc -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vuint32m4_t test_vqmxm8u_vv_u8 (vuint32m4_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_arcv_vqmxm8u_vv_u32m4 (vd, vs1, vs2, vl); } -vuint32m4_t test_vqmxm8u_vv_u8_m (vbool8_t mask, vuint32m4_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_arcv_vqmxm8u_vv_u32m4_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqmxm8u\\.vv" 2 } } */ \ No newline at end of file +/* +** test_vqmxm8u_vv_u8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm8u\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vuint32m4_t test_vqmxm8u_vv_u8 (vuint32m4_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqmxm8u_vv_u32m4 (vd, vs1, vs2, vl); +} + +/* +** test_vqmxm8u_vv_u8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm8u\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vuint32m4_t test_vqmxm8u_vv_u8_m (vbool8_t mask, vuint32m4_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqmxm8u_vv_u32m4_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-mxmd-vqmxm16_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-mxmd-vqmxm16_vv-compile-1.c index f7fa2e273e92..a65f3582947c 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-mxmd-vqmxm16_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-mxmd-vqmxm16_vv-compile-1.c @@ -1,13 +1,31 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_mxmd } */ -/* { dg-options "-march=rv32im_xarcvmxmd -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvmxmd -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m4_t test_vqmxm16_vv_i8 (vint32m4_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vqmxm16_vv_i32m4 (vd, vs1, vs2, vl); } -vint32m4_t test_vqmxm16_vv_i8_m (vbool8_t mask, vint32m4_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vqmxm16_vv_i32m4_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqmxm16\\.vv" 2 } } */ \ No newline at end of file +/* +** test_vqmxm16_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm16\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m4_t test_vqmxm16_vv_i8 (vint32m4_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqmxm16_vv_i32m4 (vd, vs1, vs2, vl); +} + +/* +** test_vqmxm16_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm16\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m4_t test_vqmxm16_vv_i8_m (vbool8_t mask, vint32m4_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqmxm16_vv_i32m4_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-mxmd-vqmxm16su_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-mxmd-vqmxm16su_vv-compile-1.c index 1bd6741c1314..80d7b8dfba06 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-mxmd-vqmxm16su_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-mxmd-vqmxm16su_vv-compile-1.c @@ -1,13 +1,31 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_mxmd } */ -/* { dg-options "-march=rv32im_xarcvmxmd -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvmxmd -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m4_t test_vqmxm16su_vv_i8 (vint32m4_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_arcv_vqmxm16su_vv_i32m4 (vd, vs1, vs2, vl); } -vint32m4_t test_vqmxm16su_vv_i8_m (vbool8_t mask, vint32m4_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_arcv_vqmxm16su_vv_i32m4_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqmxm16su\\.vv" 2 } } */ \ No newline at end of file +/* +** test_vqmxm16su_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm16su\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m4_t test_vqmxm16su_vv_i8 (vint32m4_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqmxm16su_vv_i32m4 (vd, vs1, vs2, vl); +} + +/* +** test_vqmxm16su_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm16su\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m4_t test_vqmxm16su_vv_i8_m (vbool8_t mask, vint32m4_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqmxm16su_vv_i32m4_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-mxmd-vqmxm16u_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-mxmd-vqmxm16u_vv-compile-1.c index ed6907994c85..b7013cf4bf87 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-mxmd-vqmxm16u_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-mxmd-vqmxm16u_vv-compile-1.c @@ -1,13 +1,31 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_mxmd } */ -/* { dg-options "-march=rv32im_xarcvmxmd -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvmxmd -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vuint32m4_t test_vqmxm16u_vv_u8 (vuint32m4_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_arcv_vqmxm16u_vv_u32m4 (vd, vs1, vs2, vl); } -vuint32m4_t test_vqmxm16u_vv_u8_m (vbool8_t mask, vuint32m4_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_arcv_vqmxm16u_vv_u32m4_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqmxm16u\\.vv" 2 } } */ \ No newline at end of file +/* +** test_vqmxm16u_vv_u8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm16u\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vuint32m4_t test_vqmxm16u_vv_u8 (vuint32m4_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqmxm16u_vv_u32m4 (vd, vs1, vs2, vl); +} + +/* +** test_vqmxm16u_vv_u8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm16u\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vuint32m4_t test_vqmxm16u_vv_u8_m (vbool8_t mask, vuint32m4_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqmxm16u_vv_u32m4_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vcmuli_v-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vcmuli_v-compile-1.c index 63d43dfe0b87..ae26b98f4343 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vcmuli_v-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vcmuli_v-compile-1.c @@ -1,17 +1,53 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m1_t test_vcmuli_v_i16 (vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vcmuli_v_i16m1 (vs2, vl); } -vint16m1_t test_vcmuli_v_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vcmuli_v_i16m1_m (mask, vs2, vl); } -vint32m1_t test_vcmuli_v_i32 (vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vcmuli_v_i32m1 (vs2, vl); } -vint32m1_t test_vcmuli_v_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vcmuli_v_i32m1_m (mask, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vcmuli\\.v" 4 } } */ \ No newline at end of file +/* +** test_vcmuli_v_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vcmuli\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint16m1_t test_vcmuli_v_i16 (vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vcmuli_v_i16m1 (vs2, vl); +} + +/* +** test_vcmuli_v_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vcmuli\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint16m1_t test_vcmuli_v_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vcmuli_v_i16m1_m (mask, vs2, vl); +} + +/* +** test_vcmuli_v_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vcmuli\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m1_t test_vcmuli_v_i32 (vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vcmuli_v_i32m1 (vs2, vl); +} + +/* +** test_vcmuli_v_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vcmuli\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_vcmuli_v_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vcmuli_v_i32m1_m (mask, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vcmulni_v-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vcmulni_v-compile-1.c index 203ca992ec5c..c519a9edfc85 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vcmulni_v-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vcmulni_v-compile-1.c @@ -1,17 +1,53 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m1_t test_vcmulni_v_i16 (vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vcmulni_v_i16m1 (vs2, vl); } -vint16m1_t test_vcmulni_v_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vcmulni_v_i16m1_m (mask, vs2, vl); } -vint32m1_t test_vcmulni_v_i32 (vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vcmulni_v_i32m1 (vs2, vl); } -vint32m1_t test_vcmulni_v_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vcmulni_v_i32m1_m (mask, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vcmulni\\.v" 4 } } */ \ No newline at end of file +/* +** test_vcmulni_v_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vcmulni\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint16m1_t test_vcmulni_v_i16 (vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vcmulni_v_i16m1 (vs2, vl); +} + +/* +** test_vcmulni_v_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vcmulni\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint16m1_t test_vcmulni_v_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vcmulni_v_i16m1_m (mask, vs2, vl); +} + +/* +** test_vcmulni_v_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vcmulni\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m1_t test_vcmulni_v_i32 (vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vcmulni_v_i32m1 (vs2, vl); +} + +/* +** test_vcmulni_v_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vcmulni\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_vcmulni_v_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vcmulni_v_i32m1_m (mask, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vconj_v-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vconj_v-compile-1.c index 78457d2de257..99235917e572 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vconj_v-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vconj_v-compile-1.c @@ -1,17 +1,53 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m1_t test_vconj_v_i16 (vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vconj_v_i16m1 (vs2, vl); } -vint16m1_t test_vconj_v_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vconj_v_i16m1_m (mask, vs2, vl); } -vint32m1_t test_vconj_v_i32 (vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vconj_v_i32m1 (vs2, vl); } -vint32m1_t test_vconj_v_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vconj_v_i32m1_m (mask, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vconj\\.v" 4 } } */ \ No newline at end of file +/* +** test_vconj_v_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vconj\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint16m1_t test_vconj_v_i16 (vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vconj_v_i16m1 (vs2, vl); +} + +/* +** test_vconj_v_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vconj\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint16m1_t test_vconj_v_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vconj_v_i16m1_m (mask, vs2, vl); +} + +/* +** test_vconj_v_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vconj\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m1_t test_vconj_v_i32 (vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vconj_v_i32m1 (vs2, vl); +} + +/* +** test_vconj_v_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vconj\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_vconj_v_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vconj_v_i32m1_m (mask, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-veven_v-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-veven_v-compile-1.c index 55cf43400459..adb41cf1fb32 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-veven_v-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-veven_v-compile-1.c @@ -1,17 +1,53 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m1_t test_veven_v_i16 (vint16m1_t vs2, size_t vl) { - return __riscv_arcv_veven_v_i16m1 (vs2, vl); } -vint16m1_t test_veven_v_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_veven_v_i16m1_m (mask, vs2, vl); } -vint32m1_t test_veven_v_i32 (vint32m1_t vs2, size_t vl) { - return __riscv_arcv_veven_v_i32m1 (vs2, vl); } -vint32m1_t test_veven_v_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_veven_v_i32m1_m (mask, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.veven\\.v" 4 } } */ \ No newline at end of file +/* +** test_veven_v_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.veven\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint16m1_t test_veven_v_i16 (vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_veven_v_i16m1 (vs2, vl); +} + +/* +** test_veven_v_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.veven\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint16m1_t test_veven_v_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_veven_v_i16m1_m (mask, vs2, vl); +} + +/* +** test_veven_v_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.veven\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m1_t test_veven_v_i32 (vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_veven_v_i32m1 (vs2, vl); +} + +/* +** test_veven_v_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.veven\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_veven_v_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_veven_v_i32m1_m (mask, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vinterleave_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vinterleave_vv-compile-1.c index 2741c8a25704..3e82bff99c49 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vinterleave_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vinterleave_vv-compile-1.c @@ -1,17 +1,53 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m1_t test_vinterleave_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vinterleave_vv_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vinterleave_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vinterleave_vv_i16m1_m (mask, vs2, vs1, vl); } -vint32m1_t test_vinterleave_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vinterleave_vv_i32m1 (vs2, vs1, vl); } -vint32m1_t test_vinterleave_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vinterleave_vv_i32m1_m (mask, vs2, vs1, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vinterleave\\.vv" 4 } } */ \ No newline at end of file +/* +** test_vinterleave_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vinterleave\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint16m1_t test_vinterleave_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vinterleave_vv_i16m1 (vs2, vs1, vl); +} + +/* +** test_vinterleave_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vinterleave\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint16m1_t test_vinterleave_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vinterleave_vv_i16m1_m (mask, vs2, vs1, vl); +} + +/* +** test_vinterleave_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vinterleave\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m1_t test_vinterleave_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vinterleave_vv_i32m1 (vs2, vs1, vl); +} + +/* +** test_vinterleave_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vinterleave\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_vinterleave_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vinterleave_vv_i32m1_m (mask, vs2, vs1, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vodd_v-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vodd_v-compile-1.c index 092ed67f92c9..cee3d6b64e62 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vodd_v-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vodd_v-compile-1.c @@ -1,17 +1,53 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m1_t test_vodd_v_i16 (vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vodd_v_i16m1 (vs2, vl); } -vint16m1_t test_vodd_v_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vodd_v_i16m1_m (mask, vs2, vl); } -vint32m1_t test_vodd_v_i32 (vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vodd_v_i32m1 (vs2, vl); } -vint32m1_t test_vodd_v_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vodd_v_i32m1_m (mask, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vodd\\.v" 4 } } */ \ No newline at end of file +/* +** test_vodd_v_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vodd\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint16m1_t test_vodd_v_i16 (vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vodd_v_i16m1 (vs2, vl); +} + +/* +** test_vodd_v_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vodd\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint16m1_t test_vodd_v_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vodd_v_i16m1_m (mask, vs2, vl); +} + +/* +** test_vodd_v_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vodd\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m1_t test_vodd_v_i32 (vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vodd_v_i32m1 (vs2, vl); +} + +/* +** test_vodd_v_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vodd\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_vodd_v_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vodd_v_i32m1_m (mask, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vqcjrdot_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vqcjrdot_vv-compile-1.c index 5e86b1875435..081206d1ef0c 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vqcjrdot_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vqcjrdot_vv-compile-1.c @@ -1,13 +1,31 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint64m1_t test_vqcjrdot_vv_i16 (vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vqcjrdot_vv_i16m1_i64m1 (vd, vs1, vs2, vl); } -vint64m1_t test_vqcjrdot_vv_i16_m (vbool16_t mask, vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vqcjrdot_vv_i16m1_i64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqcjrdot\\.vv" 2 } } */ \ No newline at end of file +/* +** test_vqcjrdot_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vqcjrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint64m1_t test_vqcjrdot_vv_i16 (vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqcjrdot_vv_i16m1_i64m1 (vd, vs1, vs2, vl); +} + +/* +** test_vqcjrdot_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vqcjrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint64m1_t test_vqcjrdot_vv_i16_m (vbool16_t mask, vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqcjrdot_vv_i16m1_i64m1_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vqcrdot_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vqcrdot_vv-compile-1.c index e96cb9d37702..33c4150d4808 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vqcrdot_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vqcrdot_vv-compile-1.c @@ -1,13 +1,31 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint64m1_t test_vqcrdot_vv_i16 (vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vqcrdot_vv_i16m1_i64m1 (vd, vs1, vs2, vl); } -vint64m1_t test_vqcrdot_vv_i16_m (vbool16_t mask, vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vqcrdot_vv_i16m1_i64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqcrdot\\.vv" 2 } } */ \ No newline at end of file +/* +** test_vqcrdot_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vqcrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint64m1_t test_vqcrdot_vv_i16 (vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqcrdot_vv_i16m1_i64m1 (vd, vs1, vs2, vl); +} + +/* +** test_vqcrdot_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vqcrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint64m1_t test_vqcrdot_vv_i16_m (vbool16_t mask, vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqcrdot_vv_i16m1_i64m1_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscredsum_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscredsum_vv-compile-1.c index c36118f3b2c2..bc5ea0f300a1 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscredsum_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscredsum_vv-compile-1.c @@ -1,21 +1,75 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m1_t test_vscredsum_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vscredsum_vv_i16m1_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vscredsum_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vscredsum_vv_i16m1_i16m1_m (mask, vs2, vs1, vl); } -vint32m1_t test_vscredsum_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vscredsum_vv_i32m1_i32m1 (vs2, vs1, vl); } -vint32m1_t test_vscredsum_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vscredsum_vv_i32m1_i32m1_m (mask, vs2, vs1, vl); } -vint64m1_t test_vscredsum_vv_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) { - return __riscv_arcv_vscredsum_vv_i64m1_i64m1 (vs2, vs1, vl); } -vint64m1_t test_vscredsum_vv_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { - return __riscv_arcv_vscredsum_vv_i64m1_i64m1_m (mask, vs2, vs1, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vscredsum\\.vv" 6 } } */ \ No newline at end of file + +/* +** test_vscredsum_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vscredsum\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint16m1_t test_vscredsum_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vscredsum_vv_i16m1_i16m1 (vs2, vs1, vl); +} + +/* +** test_vscredsum_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vscredsum\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint16m1_t test_vscredsum_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vscredsum_vv_i16m1_i16m1_m (mask, vs2, vs1, vl); +} + +/* +** test_vscredsum_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vscredsum\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m1_t test_vscredsum_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vscredsum_vv_i32m1_i32m1 (vs2, vs1, vl); +} + +/* +** test_vscredsum_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vscredsum\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_vscredsum_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vscredsum_vv_i32m1_i32m1_m (mask, vs2, vs1, vl); +} + +/* +** test_vscredsum_vv_i64: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vscredsum\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint64m1_t test_vscredsum_vv_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) +{ + return __riscv_arcv_vscredsum_vv_i64m1_i64m1 (vs2, vs1, vl); +} + +/* +** test_vscredsum_vv_i64_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vscredsum\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint64m1_t test_vscredsum_vv_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) +{ + return __riscv_arcv_vscredsum_vv_i64m1_i64m1_m (mask, vs2, vs1, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwcredsum_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwcredsum_vv-compile-1.c index 5306b409afa6..37b70b72a303 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwcredsum_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwcredsum_vv-compile-1.c @@ -1,17 +1,53 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m1_t test_vwcredsum_vv_i16 (vint16m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vwcredsum_vv_i16m1_i32m1 (vs2, vs1, vl); } -vint32m1_t test_vwcredsum_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vwcredsum_vv_i16m1_i32m1_m (mask, vs2, vs1, vl); } -vint64m1_t test_vwcredsum_vv_i32 (vint32m1_t vs2, vint64m1_t vs1, size_t vl) { - return __riscv_arcv_vwcredsum_vv_i32m1_i64m1 (vs2, vs1, vl); } -vint64m1_t test_vwcredsum_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint64m1_t vs1, size_t vl) { - return __riscv_arcv_vwcredsum_vv_i32m1_i64m1_m (mask, vs2, vs1, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vwcredsum\\.vv" 4 } } */ \ No newline at end of file +/* +** test_vwcredsum_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwcredsum\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m1_t test_vwcredsum_vv_i16 (vint16m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwcredsum_vv_i16m1_i32m1 (vs2, vs1, vl); +} + +/* +** test_vwcredsum_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwcredsum\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_vwcredsum_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwcredsum_vv_i16m1_i32m1_m (mask, vs2, vs1, vl); +} + +/* +** test_vwcredsum_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwcredsum\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint64m1_t test_vwcredsum_vv_i32 (vint32m1_t vs2, vint64m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwcredsum_vv_i32m1_i64m1 (vs2, vs1, vl); +} + +/* +** test_vwcredsum_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwcredsum\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint64m1_t test_vwcredsum_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint64m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwcredsum_vv_i32m1_i64m1_m (mask, vs2, vs1, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmac_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmac_vv-compile-1.c index fe40b491d4e1..2ef924219d55 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmac_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmac_vv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwscjmac_vv_i16 (vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjmac_vv_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwscjmac_vv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjmac_vv_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwscjmac_vv_i32 (vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjmac_vv_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwscjmac_vv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjmac_vv_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwscjmac_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscjmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwscjmac_vv_i16 (vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjmac_vv_i32m2 (vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwscjmac\\.vv" 4 } } */ \ No newline at end of file +/* +** test_vwscjmac_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscjmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwscjmac_vv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjmac_vv_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwscjmac_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscjmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwscjmac_vv_i32 (vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjmac_vv_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwscjmac_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscjmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwscjmac_vv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjmac_vv_i64m2_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmac_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmac_vx-compile-1.c index cf2a68d1b458..dfc3e6c427ad 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmac_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmac_vx-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwscjmac_vx_i16 (vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjmac_vx_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwscjmac_vx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjmac_vx_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwscjmac_vx_i32 (vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjmac_vx_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwscjmac_vx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjmac_vx_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwscjmac_vx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscjmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwscjmac_vx_i16 (vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjmac_vx_i32m2 (vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwscjmac\\.vx" 4 } } */ \ No newline at end of file +/* +** test_vwscjmac_vx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscjmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwscjmac_vx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjmac_vx_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwscjmac_vx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscjmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwscjmac_vx_i32 (vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjmac_vx_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwscjmac_vx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscjmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwscjmac_vx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjmac_vx_i64m2_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmul_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmul_vv-compile-1.c index 16472ab28d2a..737a6bd4f358 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmul_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmul_vv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwscjmul_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vwscjmul_vv_i32m2 (vs2, vs1, vl); } -vint32m2_t test_vwscjmul_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vwscjmul_vv_i32m2_m (mask, vs2, vs1, vl); } -vint64m2_t test_vwscjmul_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vwscjmul_vv_i64m2 (vs2, vs1, vl); } -vint64m2_t test_vwscjmul_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vwscjmul_vv_i64m2_m (mask, vs2, vs1, vl); } +/* +** test_vwscjmul_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwscjmul.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwscjmul_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwscjmul_vv_i32m2 (vs2, vs1, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwscjmul\\.vv" 4 } } */ \ No newline at end of file +/* +** test_vwscjmul_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwscjmul.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwscjmul_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwscjmul_vv_i32m2_m (mask, vs2, vs1, vl); +} + +/* +** test_vwscjmul_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwscjmul.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwscjmul_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwscjmul_vv_i64m2 (vs2, vs1, vl); +} + +/* +** test_vwscjmul_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwscjmul.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwscjmul_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwscjmul_vv_i64m2_m (mask, vs2, vs1, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmul_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmul_vx-compile-1.c index e198956945f2..55b8621b2eaa 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmul_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmul_vx-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwscjmul_vx_i16 (vint16m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwscjmul_vx_i32m2 (vs2, vs1, vl); } -vint32m2_t test_vwscjmul_vx_i16_m (vbool16_t mask, vint16m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwscjmul_vx_i32m2_m (mask, vs2, vs1, vl); } -vint64m2_t test_vwscjmul_vx_i32 (vint32m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwscjmul_vx_i64m2 (vs2, vs1, vl); } -vint64m2_t test_vwscjmul_vx_i32_m (vbool32_t mask, vint32m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwscjmul_vx_i64m2_m (mask, vs2, vs1, vl); } +/* +** test_vwscjmul_vx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwscjmul.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m2_t +test_vwscjmul_vx_i16 (vint16m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwscjmul_vx_i32m2 (vs2, vs1, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwscjmul\\.vx" 4 } } */ \ No newline at end of file +/* +** test_vwscjmul_vx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwscjmul.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m2_t +test_vwscjmul_vx_i16_m (vbool16_t mask, vint16m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwscjmul_vx_i32m2_m (mask, vs2, vs1, vl); +} + +/* +** test_vwscjmul_vx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwscjmul.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m2_t +test_vwscjmul_vx_i32 (vint32m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwscjmul_vx_i64m2 (vs2, vs1, vl); +} + +/* +** test_vwscjmul_vx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwscjmul.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m2_t +test_vwscjmul_vx_i32_m (vbool32_t mask, vint32m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwscjmul_vx_i64m2_m (mask, vs2, vs1, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjnmsac_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjnmsac_vv-compile-1.c index 0555b1b2c2a3..2b0057474447 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjnmsac_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjnmsac_vv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwscjnmsac_vv_i16 (vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjnmsac_vv_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwscjnmsac_vv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjnmsac_vv_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwscjnmsac_vv_i32 (vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjnmsac_vv_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwscjnmsac_vv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjnmsac_vv_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwscjnmsac_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscjnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwscjnmsac_vv_i16 (vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjnmsac_vv_i32m2 (vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwscjnmsac\\.vv" 4 } } */ \ No newline at end of file +/* +** test_vwscjnmsac_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscjnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwscjnmsac_vv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjnmsac_vv_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwscjnmsac_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscjnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwscjnmsac_vv_i32 (vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjnmsac_vv_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwscjnmsac_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscjnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwscjnmsac_vv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjnmsac_vv_i64m2_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjnmsac_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjnmsac_vx-compile-1.c index 7cf4569cba8d..390d2507137b 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjnmsac_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjnmsac_vx-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwscjnmsac_vx_i16 (vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjnmsac_vx_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwscjnmsac_vx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjnmsac_vx_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwscjnmsac_vx_i32 (vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjnmsac_vx_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwscjnmsac_vx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjnmsac_vx_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwscjnmsac_vx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscjnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwscjnmsac_vx_i16 (vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjnmsac_vx_i32m2 (vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwscjnmsac\\.vx" 4 } } */ \ No newline at end of file +/* +** test_vwscjnmsac_vx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscjnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwscjnmsac_vx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjnmsac_vx_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwscjnmsac_vx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscjnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwscjnmsac_vx_i32 (vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjnmsac_vx_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwscjnmsac_vx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscjnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwscjnmsac_vx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjnmsac_vx_i64m2_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjrdot_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjrdot_vv-compile-1.c index 16a204b9687f..eb3a14feea2a 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjrdot_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjrdot_vv-compile-1.c @@ -1,17 +1,53 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m1_t test_vwscjrdot_vv_i16 (vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjrdot_vv_i16m1_i32m1 (vd, vs1, vs2, vl); } -vint32m1_t test_vwscjrdot_vv_i16_m (vbool16_t mask, vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjrdot_vv_i16m1_i32m1_m (mask, vd, vs1, vs2, vl); } -vint64m1_t test_vwscjrdot_vv_i32 (vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjrdot_vv_i32m1_i64m1 (vd, vs1, vs2, vl); } -vint64m1_t test_vwscjrdot_vv_i32_m (vbool32_t mask, vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjrdot_vv_i32m1_i64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vwscjrdot\\.vv" 4 } } */ \ No newline at end of file +/* +** test_vwscjrdot_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwscjrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m1_t test_vwscjrdot_vv_i16 (vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjrdot_vv_i16m1_i32m1 (vd, vs1, vs2, vl); +} + +/* +** test_vwscjrdot_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwscjrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_vwscjrdot_vv_i16_m (vbool16_t mask, vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjrdot_vv_i16m1_i32m1_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwscjrdot_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwscjrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint64m1_t test_vwscjrdot_vv_i32 (vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjrdot_vv_i32m1_i64m1 (vd, vs1, vs2, vl); +} + +/* +** test_vwscjrdot_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwscjrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint64m1_t test_vwscjrdot_vv_i32_m (vbool32_t mask, vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjrdot_vv_i32m1_i64m1_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmac_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmac_vv-compile-1.c index f128d8c0342c..568d18859827 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmac_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmac_vv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwscmac_vv_i16 (vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscmac_vv_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwscmac_vv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscmac_vv_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwscmac_vv_i32 (vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscmac_vv_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwscmac_vv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscmac_vv_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwscmac_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwscmac_vv_i16 (vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscmac_vv_i32m2 (vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwscmac\\.vv" 4 } } */ \ No newline at end of file +/* +** test_vwscmac_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwscmac_vv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscmac_vv_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwscmac_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwscmac_vv_i32 (vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscmac_vv_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwscmac_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwscmac_vv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscmac_vv_i64m2_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmac_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmac_vx-compile-1.c index 5f5399bf1800..10e8a38d13e6 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmac_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmac_vx-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwscmac_vx_i16 (vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscmac_vx_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwscmac_vx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscmac_vx_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwscmac_vx_i32 (vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscmac_vx_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwscmac_vx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscmac_vx_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwscmac_vx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwscmac_vx_i16 (vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscmac_vx_i32m2 (vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwscmac\\.vx" 4 } } */ \ No newline at end of file +/* +** test_vwscmac_vx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwscmac_vx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscmac_vx_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwscmac_vx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwscmac_vx_i32 (vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscmac_vx_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwscmac_vx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwscmac_vx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscmac_vx_i64m2_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmul_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmul_vv-compile-1.c index 264acb49d5a2..536e8780e08a 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmul_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmul_vv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwscmul_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vwscmul_vv_i32m2 (vs2, vs1, vl); } -vint32m2_t test_vwscmul_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vwscmul_vv_i32m2_m (mask, vs2, vs1, vl); } -vint64m2_t test_vwscmul_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vwscmul_vv_i64m2 (vs2, vs1, vl); } -vint64m2_t test_vwscmul_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vwscmul_vv_i64m2_m (mask, vs2, vs1, vl); } +/* +** test_vwscmul_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwscmul.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwscmul_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwscmul_vv_i32m2 (vs2, vs1, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwscmul\\.vv" 4 } } */ \ No newline at end of file +/* +** test_vwscmul_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwscmul.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwscmul_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwscmul_vv_i32m2_m (mask, vs2, vs1, vl); +} + +/* +** test_vwscmul_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwscmul.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwscmul_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwscmul_vv_i64m2 (vs2, vs1, vl); +} + +/* +** test_vwscmul_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwscmul.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwscmul_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwscmul_vv_i64m2_m (mask, vs2, vs1, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmul_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmul_vx-compile-1.c index 9137945bfa4c..bf04feee90f8 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmul_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmul_vx-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwscmul_vx_i16 (vint16m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwscmul_vx_i32m2 (vs2, vs1, vl); } -vint32m2_t test_vwscmul_vx_i16_m (vbool16_t mask, vint16m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwscmul_vx_i32m2_m (mask, vs2, vs1, vl); } -vint64m2_t test_vwscmul_vx_i32 (vint32m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwscmul_vx_i64m2 (vs2, vs1, vl); } -vint64m2_t test_vwscmul_vx_i32_m (vbool32_t mask, vint32m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwscmul_vx_i64m2_m (mask, vs2, vs1, vl); } +/* +** test_vwscmul_vx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwscmul.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m2_t +test_vwscmul_vx_i16 (vint16m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwscmul_vx_i32m2 (vs2, vs1, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwscmul\\.vx" 4 } } */ \ No newline at end of file +/* +** test_vwscmul_vx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwscmul.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m2_t +test_vwscmul_vx_i16_m (vbool16_t mask, vint16m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwscmul_vx_i32m2_m (mask, vs2, vs1, vl); +} + +/* +** test_vwscmul_vx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwscmul.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m2_t +test_vwscmul_vx_i32 (vint32m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwscmul_vx_i64m2 (vs2, vs1, vl); +} + +/* +** test_vwscmul_vx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwscmul.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m2_t +test_vwscmul_vx_i32_m (vbool32_t mask, vint32m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwscmul_vx_i64m2_m (mask, vs2, vs1, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscnmsac_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscnmsac_vv-compile-1.c index 41e62b257327..6862943a98d5 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscnmsac_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscnmsac_vv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwscnmsac_vv_i16 (vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscnmsac_vv_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwscnmsac_vv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscnmsac_vv_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwscnmsac_vv_i32 (vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscnmsac_vv_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwscnmsac_vv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscnmsac_vv_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwscnmsac_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwscnmsac_vv_i16 (vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscnmsac_vv_i32m2 (vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwscnmsac\\.vv" 4 } } */ \ No newline at end of file +/* +** test_vwscnmsac_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwscnmsac_vv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscnmsac_vv_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwscnmsac_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwscnmsac_vv_i32 (vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscnmsac_vv_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwscnmsac_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwscnmsac_vv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscnmsac_vv_i64m2_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscnmsac_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscnmsac_vx-compile-1.c index 21782f26c04a..ab6215efe0c9 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscnmsac_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscnmsac_vx-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwscnmsac_vx_i16 (vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscnmsac_vx_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwscnmsac_vx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscnmsac_vx_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwscnmsac_vx_i32 (vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscnmsac_vx_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwscnmsac_vx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscnmsac_vx_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwscnmsac_vx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwscnmsac_vx_i16 (vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscnmsac_vx_i32m2 (vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwscnmsac\\.vx" 4 } } */ \ No newline at end of file +/* +** test_vwscnmsac_vx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwscnmsac_vx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscnmsac_vx_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwscnmsac_vx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwscnmsac_vx_i32 (vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscnmsac_vx_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwscnmsac_vx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwscnmsac_vx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscnmsac_vx_i64m2_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscrdot_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscrdot_vv-compile-1.c index 88362dd950c0..041994226a7b 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscrdot_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscrdot_vv-compile-1.c @@ -1,17 +1,53 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m1_t test_vwscrdot_vv_i16 (vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscrdot_vv_i16m1_i32m1 (vd, vs1, vs2, vl); } -vint32m1_t test_vwscrdot_vv_i16_m (vbool16_t mask, vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscrdot_vv_i16m1_i32m1_m (mask, vd, vs1, vs2, vl); } -vint64m1_t test_vwscrdot_vv_i32 (vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscrdot_vv_i32m1_i64m1 (vd, vs1, vs2, vl); } -vint64m1_t test_vwscrdot_vv_i32_m (vbool32_t mask, vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscrdot_vv_i32m1_i64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vwscrdot\\.vv" 4 } } */ \ No newline at end of file +/* +** test_vwscrdot_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwscrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m1_t test_vwscrdot_vv_i16 (vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscrdot_vv_i16m1_i32m1 (vd, vs1, vs2, vl); +} + +/* +** test_vwscrdot_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwscrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_vwscrdot_vv_i16_m (vbool16_t mask, vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscrdot_vv_i16m1_i32m1_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwscrdot_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwscrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint64m1_t test_vwscrdot_vv_i32 (vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscrdot_vv_i32m1_i64m1 (vd, vs1, vs2, vl); +} + +/* +** test_vwscrdot_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwscrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint64m1_t test_vwscrdot_vv_i32_m (vbool32_t mask, vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscrdot_vv_i32m1_i64m1_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vaddsub_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vaddsub_vv-compile-1.c index 2b169c20361d..2c5f0c609416 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vaddsub_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vaddsub_vv-compile-1.c @@ -1,21 +1,75 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m1_t test_vaddsub_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vaddsub_vv_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vaddsub_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vaddsub_vv_i16m1_m (mask, vs2, vs1, vl); } -vint32m1_t test_vaddsub_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vaddsub_vv_i32m1 (vs2, vs1, vl); } -vint32m1_t test_vaddsub_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vaddsub_vv_i32m1_m (mask, vs2, vs1, vl); } -vint64m1_t test_vaddsub_vv_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) { - return __riscv_arcv_vaddsub_vv_i64m1 (vs2, vs1, vl); } -vint64m1_t test_vaddsub_vv_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { - return __riscv_arcv_vaddsub_vv_i64m1_m (mask, vs2, vs1, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vaddsub\\.vv" 6 } } */ \ No newline at end of file + +/* +** test_vaddsub_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vaddsub\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint16m1_t test_vaddsub_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vaddsub_vv_i16m1 (vs2, vs1, vl); +} + +/* +** test_vaddsub_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vaddsub\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint16m1_t test_vaddsub_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vaddsub_vv_i16m1_m (mask, vs2, vs1, vl); +} + +/* +** test_vaddsub_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vaddsub\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m1_t test_vaddsub_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vaddsub_vv_i32m1 (vs2, vs1, vl); +} + +/* +** test_vaddsub_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vaddsub\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_vaddsub_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vaddsub_vv_i32m1_m (mask, vs2, vs1, vl); +} + +/* +** test_vaddsub_vv_i64: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vaddsub\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint64m1_t test_vaddsub_vv_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) +{ + return __riscv_arcv_vaddsub_vv_i64m1 (vs2, vs1, vl); +} + +/* +** test_vaddsub_vv_i64_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vaddsub\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint64m1_t test_vaddsub_vv_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) +{ + return __riscv_arcv_vaddsub_vv_i64m1_m (mask, vs2, vs1, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vclr_v_i-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vclr_v_i-compile-1.c index 491357af00a0..7a1a9388805e 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vclr_v_i-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vclr_v_i-compile-1.c @@ -1,25 +1,97 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vclr_v_i_i8 (vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vclr_v_i_i8m1 (vs2, 1, vl); } -vint8m1_t test_vclr_v_i_i8_m (vbool8_t mask, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vclr_v_i_i8m1_m (mask, vs2, 1, vl); } -vint16m1_t test_vclr_v_i_i16 (vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vclr_v_i_i16m1 (vs2, 1, vl); } -vint16m1_t test_vclr_v_i_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vclr_v_i_i16m1_m (mask, vs2, 1, vl); } -vint32m1_t test_vclr_v_i_i32 (vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vclr_v_i_i32m1 (vs2, 1, vl); } -vint32m1_t test_vclr_v_i_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vclr_v_i_i32m1_m (mask, vs2, 1, vl); } -vint64m1_t test_vclr_v_i_i64 (vint64m1_t vs2, size_t vl) { - return __riscv_arcv_vclr_v_i_i64m1 (vs2, 1, vl); } -vint64m1_t test_vclr_v_i_i64_m (vbool64_t mask, vint64m1_t vs2, size_t vl) { - return __riscv_arcv_vclr_v_i_i64m1_m (mask, vs2, 1, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vclr\\.v\\.i" 8 } } */ \ No newline at end of file + +/* +** test_vclr_v_i_i8: +** vsetivli zero,1,e8,m1,ta,ma +** arcv\.vclr\.v\.i4\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ +vint8m1_t test_vclr_v_i_i8 (vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vclr_v_i_i8m1 (vs2, 1, vl); +} + +/* +** test_vclr_v_i_i8_m: +** vsetivli zero,1,e8,m1,ta,ma +** arcv\.vclr\.v\.i4\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1,\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint8m1_t test_vclr_v_i_i8_m (vbool8_t mask, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vclr_v_i_i8m1_m (mask, vs2, 1, vl); +} + +/* +** test_vclr_v_i_i16: +** vsetivli zero,1,e16,m1,ta,ma +** arcv\.vclr\.v\.i4\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ +vint16m1_t test_vclr_v_i_i16 (vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vclr_v_i_i16m1 (vs2, 1, vl); +} + +/* +** test_vclr_v_i_i16_m: +** vsetivli zero,1,e16,m1,ta,ma +** arcv\.vclr\.v\.i4\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1,\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint16m1_t test_vclr_v_i_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vclr_v_i_i16m1_m (mask, vs2, 1, vl); +} + +/* +** test_vclr_v_i_i32: +** vsetivli zero,1,e32,m1,ta,ma +** arcv\.vclr\.v\.i4\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ +vint32m1_t test_vclr_v_i_i32 (vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vclr_v_i_i32m1 (vs2, 1, vl); +} + +/* +** test_vclr_v_i_i32_m: +** vsetivli zero,1,e32,m1,ta,ma +** arcv\.vclr\.v\.i4\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1,\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_vclr_v_i_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vclr_v_i_i32m1_m (mask, vs2, 1, vl); +} + +/* +** test_vclr_v_i_i64: +** vsetivli zero,1,e64,m1,ta,ma +** arcv\.vclr\.v\.i4\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ +vint64m1_t test_vclr_v_i_i64 (vint64m1_t vs2, size_t vl) +{ + return __riscv_arcv_vclr_v_i_i64m1 (vs2, 1, vl); +} + +/* +** test_vclr_v_i_i64_m: +** vsetivli zero,1,e64,m1,ta,ma +** arcv\.vclr\.v\.i4\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1,\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint64m1_t test_vclr_v_i_i64_m (vbool64_t mask, vint64m1_t vs2, size_t vl) +{ + return __riscv_arcv_vclr_v_i_i64m1_m (mask, vs2, 1, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmv_s_v-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmv_s_v-compile-1.c index c37b480f6a17..d51bf6907910 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmv_s_v-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmv_s_v-compile-1.c @@ -1,23 +1,79 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vmv_s_v_i8m1 (vint8m1_t vd, int vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vmv_s_v_i8m1_i8m1 (vd, vs1, vs2, vl); } -vint8m1_t test_vmv_s_v_i8m2 (vint8m1_t vd, int vs1, vint8m2_t vs2, size_t vl) { - return __riscv_arcv_vmv_s_v_i8m2_i8m1 (vd, vs1, vs2, vl); } -vint8m1_t test_vmv_s_v_i8m4 (vint8m1_t vd, int vs1, vint8m4_t vs2, size_t vl) { - return __riscv_arcv_vmv_s_v_i8m4_i8m1 (vd, vs1, vs2, vl); } -vint16m1_t test_vmv_s_v_i16m1 (vint16m1_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vmv_s_v_i16m1_i16m1 (vd, vs1, vs2, vl); } -vint32m1_t test_vmv_s_v_i32m1 (vint32m1_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vmv_s_v_i32m1_i32m1 (vd, vs1, vs2, vl); } -vint32m1_t test_vmv_s_v_i32m2 (vint32m1_t vd, int vs1, vint32m2_t vs2, size_t vl) { - return __riscv_arcv_vmv_s_v_i32m2_i32m1 (vd, vs1, vs2, vl); } -vint64m1_t test_vmv_s_v_i64m1 (vint64m1_t vd, int vs1, vint64m1_t vs2, size_t vl) { - return __riscv_arcv_vmv_s_v_i64m1_i64m1 (vd, vs1, vs2, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vmv\\.s\\.v" 7 } } */ + +/* +** test_vmv_s_v_i8m1: +** arcv\.vmv\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t test_vmv_s_v_i8m1 (vint8m1_t vd, int vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vmv_s_v_i8m1_i8m1 (vd, vs1, vs2, vl); +} + +/* +** test_vmv_s_v_i8m2: +** arcv\.vmv\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t test_vmv_s_v_i8m2 (vint8m1_t vd, int vs1, vint8m2_t vs2, size_t vl) +{ + return __riscv_arcv_vmv_s_v_i8m2_i8m1 (vd, vs1, vs2, vl); +} + +/* +** test_vmv_s_v_i8m4: +** arcv\.vmv\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t test_vmv_s_v_i8m4 (vint8m1_t vd, int vs1, vint8m4_t vs2, size_t vl) +{ + return __riscv_arcv_vmv_s_v_i8m4_i8m1 (vd, vs1, vs2, vl); +} + +/* +** test_vmv_s_v_i16m1: +** arcv\.vmv\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t test_vmv_s_v_i16m1 (vint16m1_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vmv_s_v_i16m1_i16m1 (vd, vs1, vs2, vl); +} + +/* +** test_vmv_s_v_i32m1: +** arcv\.vmv\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t test_vmv_s_v_i32m1 (vint32m1_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vmv_s_v_i32m1_i32m1 (vd, vs1, vs2, vl); +} + +/* +** test_vmv_s_v_i32m2: +** arcv\.vmv\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t test_vmv_s_v_i32m2 (vint32m1_t vd, int vs1, vint32m2_t vs2, size_t vl) +{ + return __riscv_arcv_vmv_s_v_i32m2_i32m1 (vd, vs1, vs2, vl); +} + +/* +** test_vmv_s_v_i64m1: +** arcv\.vmv\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m1_t test_vmv_s_v_i64m1 (vint64m1_t vd, int vs1, vint64m1_t vs2, size_t vl) +{ + return __riscv_arcv_vmv_s_v_i64m1_i64m1 (vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmv_v_s-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmv_v_s-compile-1.c index 647596597107..3b913981a510 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmv_v_s-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmv_v_s-compile-1.c @@ -1,17 +1,49 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vmv_v_s_i8 (vint8m1_t vd, int vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vmv_v_s_i8m1 (vd, vs1, vs2, vl); } -vint16m1_t test_vmv_v_s_i16 (vint16m1_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vmv_v_s_i16m1 (vd, vs1, vs2, vl); } -vint32m1_t test_vmv_v_s_i32 (vint32m1_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vmv_v_s_i32m1 (vd, vs1, vs2, vl); } -vint64m1_t test_vmv_v_s_i64 (vint64m1_t vd, int vs1, vint64m1_t vs2, size_t vl) { - return __riscv_arcv_vmv_v_s_i64m1 (vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vmv\\.v\\.s" 4 } } */ \ No newline at end of file +/* +** test_vmv_v_s_i8: +** arcv\.vmv\.v\.s\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t test_vmv_v_s_i8 (vint8m1_t vd, int vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vmv_v_s_i8m1 (vd, vs1, vs2, vl); +} + +/* +** test_vmv_v_s_i16: +** arcv\.vmv\.v\.s\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t test_vmv_v_s_i16 (vint16m1_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vmv_v_s_i16m1 (vd, vs1, vs2, vl); +} + +/* +** test_vmv_v_s_i32: +** arcv\.vmv\.v\.s\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t test_vmv_v_s_i32 (vint32m1_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vmv_v_s_i32m1 (vd, vs1, vs2, vl); +} + +/* +** test_vmv_v_s_i64: +** arcv\.vmv\.v\.s\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m1_t test_vmv_v_s_i64 (vint64m1_t vd, int vs1, vint64m1_t vs2, size_t vl) +{ + return __riscv_arcv_vmv_v_s_i64m1 (vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmvi_s_v-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmvi_s_v-compile-1.c index b63911b16bb4..aa246be812de 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmvi_s_v-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmvi_s_v-compile-1.c @@ -1,23 +1,79 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vmv_s_v_i8m1 (vint8m1_t vd, int vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vmv_s_v_i8m1_i8m1 (vd, 1, vs2, vl); } -vint8m1_t test_vmv_s_v_i8m2 (vint8m1_t vd, int vs1, vint8m2_t vs2, size_t vl) { - return __riscv_arcv_vmv_s_v_i8m2_i8m1 (vd, 1, vs2, vl); } -vint8m1_t test_vmv_s_v_i8m4 (vint8m1_t vd, int vs1, vint8m4_t vs2, size_t vl) { - return __riscv_arcv_vmv_s_v_i8m4_i8m1 (vd, 1, vs2, vl); } -vint16m1_t test_vmv_s_v_i16m1 (vint16m1_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vmv_s_v_i16m1_i16m1 (vd, 1, vs2, vl); } -vint32m1_t test_vmv_s_v_i32m1 (vint32m1_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vmv_s_v_i32m1_i32m1 (vd, 1, vs2, vl); } -vint32m1_t test_vmv_s_v_i32m2 (vint32m1_t vd, int vs1, vint32m2_t vs2, size_t vl) { - return __riscv_arcv_vmv_s_v_i32m2_i32m1 (vd, 1, vs2, vl); } -vint64m1_t test_vmv_s_v_i64m1 (vint64m1_t vd, int vs1, vint64m1_t vs2, size_t vl) { - return __riscv_arcv_vmv_s_v_i64m1_i64m1 (vd, 1, vs2, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vmvi\\.s\\.v" 7 } } */ + +/* +** test_vmv_s_v_i8m1: +** arcv\.vmvi\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ +vint8m1_t test_vmv_s_v_i8m1 (vint8m1_t vd, int vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vmv_s_v_i8m1_i8m1 (vd, 1, vs2, vl); +} + +/* +** test_vmv_s_v_i8m2: +** arcv\.vmvi\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ +vint8m1_t test_vmv_s_v_i8m2 (vint8m1_t vd, int vs1, vint8m2_t vs2, size_t vl) +{ + return __riscv_arcv_vmv_s_v_i8m2_i8m1 (vd, 1, vs2, vl); +} + +/* +** test_vmv_s_v_i8m4: +** arcv\.vmvi\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ +vint8m1_t test_vmv_s_v_i8m4 (vint8m1_t vd, int vs1, vint8m4_t vs2, size_t vl) +{ + return __riscv_arcv_vmv_s_v_i8m4_i8m1 (vd, 1, vs2, vl); +} + +/* +** test_vmv_s_v_i16m1: +** arcv\.vmvi\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ +vint16m1_t test_vmv_s_v_i16m1 (vint16m1_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vmv_s_v_i16m1_i16m1 (vd, 1, vs2, vl); +} + +/* +** test_vmv_s_v_i32m1: +** arcv\.vmvi\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ +vint32m1_t test_vmv_s_v_i32m1 (vint32m1_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vmv_s_v_i32m1_i32m1 (vd, 1, vs2, vl); +} + +/* +** test_vmv_s_v_i32m2: +** arcv\.vmvi\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ +vint32m1_t test_vmv_s_v_i32m2 (vint32m1_t vd, int vs1, vint32m2_t vs2, size_t vl) +{ + return __riscv_arcv_vmv_s_v_i32m2_i32m1 (vd, 1, vs2, vl); +} + +/* +** test_vmv_s_v_i64m1: +** arcv\.vmvi\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ +vint64m1_t test_vmv_s_v_i64m1 (vint64m1_t vd, int vs1, vint64m1_t vs2, size_t vl) +{ + return __riscv_arcv_vmv_s_v_i64m1_i64m1 (vd, 1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmvi_v_s-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmvi_v_s-compile-1.c index 4e5bc7a3972a..b40faeb73a27 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmvi_v_s-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmvi_v_s-compile-1.c @@ -1,17 +1,49 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vmvi_v_s_i8 (vint8m1_t vd, int vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vmv_v_s_i8m1 (vd, 1, vs2, vl); } -vint16m1_t test_vmvi_v_s_i16 (vint16m1_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vmv_v_s_i16m1 (vd, 1, vs2, vl); } -vint32m1_t test_vmvi_v_s_i32 (vint32m1_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vmv_v_s_i32m1 (vd, 1, vs2, vl); } -vint64m1_t test_vmvi_v_s_i64 (vint64m1_t vd, int vs1, vint64m1_t vs2, size_t vl) { - return __riscv_arcv_vmv_v_s_i64m1 (vd, 1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vmvi\\.v\\.s" 4 } } */ \ No newline at end of file +/* +** test_vmvi_v_s_i8: +** arcv\.vmvi\.v\.s\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ +vint8m1_t test_vmvi_v_s_i8 (vint8m1_t vd, int vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vmv_v_s_i8m1 (vd, 1, vs2, vl); +} + +/* +** test_vmvi_v_s_i16: +** arcv\.vmvi\.v\.s\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ +vint16m1_t test_vmvi_v_s_i16 (vint16m1_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vmv_v_s_i16m1 (vd, 1, vs2, vl); +} + +/* +** test_vmvi_v_s_i32: +** arcv\.vmvi\.v\.s\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ +vint32m1_t test_vmvi_v_s_i32 (vint32m1_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vmv_v_s_i32m1 (vd, 1, vs2, vl); +} + +/* +** test_vmvi_v_s_i64: +** arcv\.vmvi\.v\.s\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ +vint64m1_t test_vmvi_v_s_i64 (vint64m1_t vd, int vs1, vint64m1_t vs2, size_t vl) +{ + return __riscv_arcv_vmv_v_s_i64m1 (vd, 1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnorm_v-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnorm_v-compile-1.c index eb94e394e37a..05bfcce60487 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnorm_v-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnorm_v-compile-1.c @@ -1,21 +1,75 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m1_t test_vnorm_v_i16 (vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vnorm_v_i16m1 (vs2, vl); } -vint16m1_t test_vnorm_v_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vnorm_v_i16m1_m (mask, vs2, vl); } -vint32m1_t test_vnorm_v_i32 (vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vnorm_v_i32m1 (vs2, vl); } -vint32m1_t test_vnorm_v_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vnorm_v_i32m1_m (mask, vs2, vl); } -vint64m1_t test_vnorm_v_i64 (vint64m1_t vs2, size_t vl) { - return __riscv_arcv_vnorm_v_i64m1 (vs2, vl); } -vint64m1_t test_vnorm_v_i64_m (vbool64_t mask, vint64m1_t vs2, size_t vl) { - return __riscv_arcv_vnorm_v_i64m1_m (mask, vs2, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vnorm\\.v" 6 } } */ \ No newline at end of file + +/* +** test_vnorm_v_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vnorm\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint16m1_t test_vnorm_v_i16 (vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vnorm_v_i16m1 (vs2, vl); +} + +/* +** test_vnorm_v_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vnorm\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint16m1_t test_vnorm_v_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vnorm_v_i16m1_m (mask, vs2, vl); +} + +/* +** test_vnorm_v_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vnorm\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m1_t test_vnorm_v_i32 (vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vnorm_v_i32m1 (vs2, vl); +} + +/* +** test_vnorm_v_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vnorm\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_vnorm_v_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vnorm_v_i32m1_m (mask, vs2, vl); +} + +/* +** test_vnorm_v_i64: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vnorm\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint64m1_t test_vnorm_v_i64 (vint64m1_t vs2, size_t vl) +{ + return __riscv_arcv_vnorm_v_i64m1 (vs2, vl); +} + +/* +** test_vnorm_v_i64_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vnorm\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint64m1_t test_vnorm_v_i64_m (vbool64_t mask, vint64m1_t vs2, size_t vl) +{ + return __riscv_arcv_vnorm_v_i64m1_m (mask, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qi-compile-1.c index bcae9fb816ad..9cad20aaa329 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qi-compile-1.c @@ -11,7 +11,7 @@ ** test_vnsra_2s_qi_i8: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint8m1_t @@ -24,7 +24,7 @@ test_vnsra_2s_qi_i8 (vint32m4_t vs2, vint8m1_t vs1, size_t vl) ** test_vnsra_2s_qi_i8_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint8m1_t @@ -37,7 +37,7 @@ test_vnsra_2s_qi_i8_m (vbool8_t mask, vint32m4_t vs2, vint8m1_t vs1, size_t vl) ** test_vnsra_2s_qi_i16: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint16m1_t @@ -50,7 +50,7 @@ test_vnsra_2s_qi_i16 (vint64m4_t vs2, vint16m1_t vs1, size_t vl) ** test_vnsra_2s_qi_i16_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint16m1_t diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qv-compile-1.c index 4c82ae0d7283..12325d06dc82 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qv-compile-1.c @@ -11,7 +11,7 @@ ** test_vnsra_2s_qv_i8: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint8m1_t @@ -24,7 +24,7 @@ test_vnsra_2s_qv_i8 (vint32m4_t vs2, vint8m1_t vs1, size_t vl) ** test_vnsra_2s_qv_i8_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint8m1_t @@ -37,7 +37,7 @@ test_vnsra_2s_qv_i8_m (vbool8_t mask, vint32m4_t vs2, vint8m1_t vs1, size_t vl) ** test_vnsra_2s_qv_i16: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint16m1_t @@ -50,7 +50,7 @@ test_vnsra_2s_qv_i16 (vint64m4_t vs2, vint16m1_t vs1, size_t vl) ** test_vnsra_2s_qv_i16_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint16m1_t diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qx-compile-1.c index 6da8cd4aeedc..4df8656b8129 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qx-compile-1.c @@ -4,7 +4,6 @@ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ /* { dg-final { check-function-bodies "**" "" } } */ - #include #include @@ -12,7 +11,7 @@ ** test_vnsra_2s_qx_i8: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint8m1_t @@ -25,7 +24,7 @@ test_vnsra_2s_qx_i8 (vint32m4_t vs2, int vs1, size_t vl) ** test_vnsra_2s_qx_i8_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint8m1_t @@ -38,7 +37,7 @@ test_vnsra_2s_qx_i8_m (vbool8_t mask, vint32m4_t vs2, int vs1, size_t vl) ** test_vnsra_2s_qx_i16: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint16m1_t @@ -51,7 +50,7 @@ test_vnsra_2s_qx_i16 (vint64m4_t vs2, int vs1, size_t vl) ** test_vnsra_2s_qx_i16_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint16m1_t diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wi-compile-1.c index afa9a8ddbc3e..3818acec263a 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wi-compile-1.c @@ -4,7 +4,6 @@ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ /* { dg-final { check-function-bodies "**" "" } } */ - #include #include @@ -12,7 +11,7 @@ ** test_vnsra_2s_wi_i8: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint8m1_t @@ -25,7 +24,7 @@ test_vnsra_2s_wi_i8 (vint16m2_t vs2, vint8m1_t vs1, size_t vl) ** test_vnsra_2s_wi_i8_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint8m1_t @@ -38,7 +37,7 @@ test_vnsra_2s_wi_i8_m (vbool8_t mask, vint16m2_t vs2, vint8m1_t vs1, size_t vl) ** test_vnsra_2s_wi_i16: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint16m1_t @@ -51,7 +50,7 @@ test_vnsra_2s_wi_i16 (vint32m2_t vs2, vint16m1_t vs1, size_t vl) ** test_vnsra_2s_wi_i16_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint16m1_t @@ -64,7 +63,7 @@ test_vnsra_2s_wi_i16_m (vbool16_t mask, vint32m2_t vs2, vint16m1_t vs1, size_t v ** test_vnsra_2s_wi_i32: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint32m1_t @@ -77,7 +76,7 @@ test_vnsra_2s_wi_i32 (vint64m2_t vs2, vint32m1_t vs1, size_t vl) ** test_vnsra_2s_wi_i32_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint32m1_t diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wv-compile-1.c index 9b74d8c87601..c5777836431f 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wv-compile-1.c @@ -11,7 +11,7 @@ ** test_vnsra_2s_wv_i8: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint8m1_t @@ -24,7 +24,7 @@ test_vnsra_2s_wv_i8 (vint16m2_t vs2, vint8m1_t vs1, size_t vl) ** test_vnsra_2s_wv_i8_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint8m1_t @@ -37,7 +37,7 @@ test_vnsra_2s_wv_i8_m (vbool8_t mask, vint16m2_t vs2, vint8m1_t vs1, size_t vl) ** test_vnsra_2s_wv_i16: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint16m1_t @@ -50,7 +50,7 @@ test_vnsra_2s_wv_i16 (vint32m2_t vs2, vint16m1_t vs1, size_t vl) ** test_vnsra_2s_wv_i16_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint16m1_t @@ -63,7 +63,7 @@ test_vnsra_2s_wv_i16_m (vbool16_t mask, vint32m2_t vs2, vint16m1_t vs1, size_t v ** test_vnsra_2s_wv_i32: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint32m1_t @@ -76,7 +76,7 @@ test_vnsra_2s_wv_i32 (vint64m2_t vs2, vint32m1_t vs1, size_t vl) ** test_vnsra_2s_wv_i32_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint32m1_t diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wx-compile-1.c index 58c1a153a204..7a2a1f1ea242 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wx-compile-1.c @@ -11,7 +11,7 @@ ** test_vnsra_2s_wx_i8: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint8m1_t @@ -24,7 +24,7 @@ test_vnsra_2s_wx_i8 (vint16m2_t vs2, int vs1, size_t vl) ** test_vnsra_2s_wx_i8_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint8m1_t @@ -37,7 +37,7 @@ test_vnsra_2s_wx_i8_m (vbool8_t mask, vint16m2_t vs2, int vs1, size_t vl) ** test_vnsra_2s_wx_i16: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint16m1_t @@ -50,7 +50,7 @@ test_vnsra_2s_wx_i16 (vint32m2_t vs2, int vs1, size_t vl) ** test_vnsra_2s_wx_i16_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint16m1_t @@ -63,7 +63,7 @@ test_vnsra_2s_wx_i16_m (vbool16_t mask, vint32m2_t vs2, int vs1, size_t vl) ** test_vnsra_2s_wx_i32: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint32m1_t @@ -76,7 +76,7 @@ test_vnsra_2s_wx_i32 (vint64m2_t vs2, int vs1, size_t vl) ** test_vnsra_2s_wx_i32_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] -** arcv.vnsra.2s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.2s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint32m1_t diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qi-compile-1.c index 4344de081c18..0b1aee52f2e7 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qi-compile-1.c @@ -11,7 +11,7 @@ ** test_vnsra_qi_i8: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint8m1_t @@ -24,7 +24,7 @@ test_vnsra_qi_i8 (vint32m4_t vs2, vint8m1_t vs1, size_t vl) ** test_vnsra_qi_i8_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint8m1_t @@ -37,7 +37,7 @@ test_vnsra_qi_i8_m (vbool8_t mask, vint32m4_t vs2, vint8m1_t vs1, size_t vl) ** test_vnsra_qi_i16: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint16m1_t @@ -50,7 +50,7 @@ test_vnsra_qi_i16 (vint64m4_t vs2, vint16m1_t vs1, size_t vl) ** test_vnsra_qi_i16_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint16m1_t diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qv-compile-1.c index 5af504ecd68c..0eb13f7b9e61 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qv-compile-1.c @@ -11,7 +11,7 @@ ** test_vnsra_qv_i8: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint8m1_t @@ -24,7 +24,7 @@ test_vnsra_qv_i8 (vint32m4_t vs2, vint8m1_t vs1, size_t vl) ** test_vnsra_qv_i8_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint8m1_t @@ -37,7 +37,7 @@ test_vnsra_qv_i8_m (vbool8_t mask, vint32m4_t vs2, vint8m1_t vs1, size_t vl) ** test_vnsra_qv_i16: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint16m1_t @@ -50,7 +50,7 @@ test_vnsra_qv_i16 (vint64m4_t vs2, vint16m1_t vs1, size_t vl) ** test_vnsra_qv_i16_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint16m1_t diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qx-compile-1.c index 7904a8f7b031..25d4935aee31 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qx-compile-1.c @@ -11,7 +11,7 @@ ** test_vnsra_qx_i8: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint8m1_t @@ -24,7 +24,7 @@ test_vnsra_qx_i8 (vint32m4_t vs2, int vs1, size_t vl) ** test_vnsra_qx_i8_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint8m1_t @@ -37,7 +37,7 @@ test_vnsra_qx_i8_m (vbool8_t mask, vint32m4_t vs2, int vs1, size_t vl) ** test_vnsra_qx_i16: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint16m1_t @@ -50,7 +50,7 @@ test_vnsra_qx_i16 (vint64m4_t vs2, int vs1, size_t vl) ** test_vnsra_qx_i16_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint16m1_t diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qi-compile-1.c index 7519dd41d6f2..92de85bd618d 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qi-compile-1.c @@ -11,7 +11,7 @@ ** test_vnsra_s_qi_i8: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint8m1_t @@ -24,7 +24,7 @@ test_vnsra_s_qi_i8 (vint32m4_t vs2, vint8m1_t vs1, size_t vl) ** test_vnsra_s_qi_i8_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint8m1_t @@ -37,7 +37,7 @@ test_vnsra_s_qi_i8_m (vbool8_t mask, vint32m4_t vs2, vint8m1_t vs1, size_t vl) ** test_vnsra_s_qi_i16: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint16m1_t @@ -50,7 +50,7 @@ test_vnsra_s_qi_i16 (vint64m4_t vs2, vint16m1_t vs1, size_t vl) ** test_vnsra_s_qi_i16_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint16m1_t diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qv-compile-1.c index 25e2e9d23d5f..d5bcb08bb9aa 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qv-compile-1.c @@ -11,7 +11,7 @@ ** test_vnsra_s_qv_i8: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint8m1_t @@ -24,7 +24,7 @@ test_vnsra_s_qv_i8 (vint32m4_t vs2, vint8m1_t vs1, size_t vl) ** test_vnsra_s_qv_i8_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint8m1_t @@ -37,7 +37,7 @@ test_vnsra_s_qv_i8_m (vbool8_t mask, vint32m4_t vs2, vint8m1_t vs1, size_t vl) ** test_vnsra_s_qv_i16: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint16m1_t @@ -50,7 +50,7 @@ test_vnsra_s_qv_i16 (vint64m4_t vs2, vint16m1_t vs1, size_t vl) ** test_vnsra_s_qv_i16_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint16m1_t diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qx-compile-1.c index 3404a8b16054..a8c858f47560 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qx-compile-1.c @@ -11,7 +11,7 @@ ** test_vnsra_s_qx_i8: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint8m1_t @@ -24,7 +24,7 @@ test_vnsra_s_qx_i8 (vint32m4_t vs2, int vs1, size_t vl) ** test_vnsra_s_qx_i8_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint8m1_t @@ -37,7 +37,7 @@ test_vnsra_s_qx_i8_m (vbool8_t mask, vint32m4_t vs2, int vs1, size_t vl) ** test_vnsra_s_qx_i16: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint16m1_t @@ -50,7 +50,7 @@ test_vnsra_s_qx_i16 (vint64m4_t vs2, int vs1, size_t vl) ** test_vnsra_s_qx_i16_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint16m1_t diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wi-compile-1.c index b08c9d546e52..bf26949d7e5a 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wi-compile-1.c @@ -11,7 +11,7 @@ ** test_vnsra_s_wi_i8: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint8m1_t @@ -24,7 +24,7 @@ test_vnsra_s_wi_i8 (vint16m2_t vs2, vint8m1_t vs1, size_t vl) ** test_vnsra_s_wi_i8_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint8m1_t @@ -37,7 +37,7 @@ test_vnsra_s_wi_i8_m (vbool8_t mask, vint16m2_t vs2, vint8m1_t vs1, size_t vl) ** test_vnsra_s_wi_i16: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint16m1_t @@ -50,7 +50,7 @@ test_vnsra_s_wi_i16 (vint32m2_t vs2, vint16m1_t vs1, size_t vl) ** test_vnsra_s_wi_i16_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint16m1_t @@ -63,7 +63,7 @@ test_vnsra_s_wi_i16_m (vbool16_t mask, vint32m2_t vs2, vint16m1_t vs1, size_t vl ** test_vnsra_s_wi_i32: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint32m1_t @@ -76,7 +76,7 @@ test_vnsra_s_wi_i32 (vint64m2_t vs2, vint32m1_t vs1, size_t vl) ** test_vnsra_s_wi_i32_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint32m1_t diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wv-compile-1.c index 5a4ed39d9777..0ddb2dfc4c20 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wv-compile-1.c @@ -11,7 +11,7 @@ ** test_vnsra_s_wv_i8: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint8m1_t @@ -24,7 +24,7 @@ test_vnsra_s_wv_i8 (vint16m2_t vs2, vint8m1_t vs1, size_t vl) ** test_vnsra_s_wv_i8_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint8m1_t @@ -37,7 +37,7 @@ test_vnsra_s_wv_i8_m (vbool8_t mask, vint16m2_t vs2, vint8m1_t vs1, size_t vl) ** test_vnsra_s_wv_i16: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint16m1_t @@ -50,7 +50,7 @@ test_vnsra_s_wv_i16 (vint32m2_t vs2, vint16m1_t vs1, size_t vl) ** test_vnsra_s_wv_i16_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint16m1_t @@ -63,7 +63,7 @@ test_vnsra_s_wv_i16_m (vbool16_t mask, vint32m2_t vs2, vint16m1_t vs1, size_t vl ** test_vnsra_s_wv_i32: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint32m1_t @@ -76,7 +76,7 @@ test_vnsra_s_wv_i32 (vint64m2_t vs2, vint32m1_t vs1, size_t vl) ** test_vnsra_s_wv_i32_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint32m1_t diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wx-compile-1.c index 36557ff9de42..f9ebbe01b57b 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wx-compile-1.c @@ -11,7 +11,7 @@ ** test_vnsra_s_wx_i8: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint8m1_t @@ -24,7 +24,7 @@ test_vnsra_s_wx_i8 (vint16m2_t vs2, int vs1, size_t vl) ** test_vnsra_s_wx_i8_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint8m1_t @@ -37,7 +37,7 @@ test_vnsra_s_wx_i8_m (vbool8_t mask, vint16m2_t vs2, int vs1, size_t vl) ** test_vnsra_s_wx_i16: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint16m1_t @@ -50,7 +50,7 @@ test_vnsra_s_wx_i16 (vint32m2_t vs2, int vs1, size_t vl) ** test_vnsra_s_wx_i16_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint16m1_t @@ -63,7 +63,7 @@ test_vnsra_s_wx_i16_m (vbool16_t mask, vint32m2_t vs2, int vs1, size_t vl) ** test_vnsra_s_wx_i32: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint32m1_t @@ -76,7 +76,7 @@ test_vnsra_s_wx_i32 (vint64m2_t vs2, int vs1, size_t vl) ** test_vnsra_s_wx_i32_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] -** arcv.vnsra.s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint32m1_t diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wi-compile-1.c index 0f9688ad4ee1..d126e2c01d40 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wi-compile-1.c @@ -11,7 +11,7 @@ ** test_vnsra_wi_i8: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint8m1_t @@ -24,7 +24,7 @@ test_vnsra_wi_i8 (vint16m2_t vs2, vint8m1_t vs1, size_t vl) ** test_vnsra_wi_i8_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint8m1_t @@ -37,7 +37,7 @@ test_vnsra_wi_i8_m (vbool8_t mask, vint16m2_t vs2, vint8m1_t vs1, size_t vl) ** test_vnsra_wi_i16: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint16m1_t @@ -50,7 +50,7 @@ test_vnsra_wi_i16 (vint32m2_t vs2, vint16m1_t vs1, size_t vl) ** test_vnsra_wi_i16_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint16m1_t @@ -63,7 +63,7 @@ test_vnsra_wi_i16_m (vbool16_t mask, vint32m2_t vs2, vint16m1_t vs1, size_t vl) ** test_vnsra_wi_i32: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] -** arcv.vnsra.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint32m1_t @@ -76,7 +76,7 @@ test_vnsra_wi_i32 (vint64m2_t vs2, vint32m1_t vs1, size_t vl) ** test_vnsra_wi_i32_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] -** arcv.vnsra.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint32m1_t diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wv-compile-1.c index eee3aefb232a..b54778ea5cf3 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wv-compile-1.c @@ -11,7 +11,7 @@ ** test_vnsra_wv_i8: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint8m1_t @@ -24,7 +24,7 @@ test_vnsra_wv_i8 (vint16m2_t vs2, vint8m1_t vs1, size_t vl) ** test_vnsra_wv_i8_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint8m1_t @@ -37,7 +37,7 @@ test_vnsra_wv_i8_m (vbool8_t mask, vint16m2_t vs2, vint8m1_t vs1, size_t vl) ** test_vnsra_wv_i16: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint16m1_t @@ -50,7 +50,7 @@ test_vnsra_wv_i16 (vint32m2_t vs2, vint16m1_t vs1, size_t vl) ** test_vnsra_wv_i16_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint16m1_t @@ -63,7 +63,7 @@ test_vnsra_wv_i16_m (vbool16_t mask, vint32m2_t vs2, vint16m1_t vs1, size_t vl) ** test_vnsra_wv_i32: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] -** arcv.vnsra.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint32m1_t @@ -76,7 +76,7 @@ test_vnsra_wv_i32 (vint64m2_t vs2, vint32m1_t vs1, size_t vl) ** test_vnsra_wv_i32_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] -** arcv.vnsra.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint32m1_t diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wx-compile-1.c index 57c54c9997f7..e7293cf148a2 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wx-compile-1.c @@ -11,7 +11,7 @@ ** test_vnsra_wx_i8: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint8m1_t @@ -24,7 +24,7 @@ test_vnsra_wx_i8 (vint16m2_t vs2, int vs1, size_t vl) ** test_vnsra_wx_i8_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] -** arcv.vnsra.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint8m1_t @@ -37,7 +37,7 @@ test_vnsra_wx_i8_m (vbool8_t mask, vint16m2_t vs2, int vs1, size_t vl) ** test_vnsra_wx_i16: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint16m1_t @@ -50,7 +50,7 @@ test_vnsra_wx_i16 (vint32m2_t vs2, int vs1, size_t vl) ** test_vnsra_wx_i16_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vnsra.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint16m1_t @@ -63,7 +63,7 @@ test_vnsra_wx_i16_m (vbool16_t mask, vint32m2_t vs2, int vs1, size_t vl) ** test_vnsra_wx_i32: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] -** arcv.vnsra.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint32m1_t @@ -76,7 +76,7 @@ test_vnsra_wx_i32 (vint64m2_t vs2, int vs1, size_t vl) ** test_vnsra_wx_i32_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] -** arcv.vnsra.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vnsra.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint32m1_t diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdot_2s_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdot_2s_vv-compile-1.c index 5e58129ebe3c..cf1d93ab2418 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdot_2s_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdot_2s_vv-compile-1.c @@ -1,17 +1,53 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m1_t test_vqrdot_2s_vv_i8 (vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vqrdot_2s_vv_i8m1_i32m1 (vd, vs1, vs2, vl); } -vint32m1_t test_vqrdot_2s_vv_i8_m (vbool8_t mask, vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vqrdot_2s_vv_i8m1_i32m1_m (mask, vd, vs1, vs2, vl); } -vint64m1_t test_vqrdot_2s_vv_i16 (vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vqrdot_2s_vv_i16m1_i64m1 (vd, vs1, vs2, vl); } -vint64m1_t test_vqrdot_2s_vv_i16_m (vbool16_t mask, vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vqrdot_2s_vv_i16m1_i64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqrdot\\.2s\\.vv" 4 } } */ \ No newline at end of file +/* +** test_vqrdot_2s_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqrdot\.2s\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m1_t test_vqrdot_2s_vv_i8 (vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqrdot_2s_vv_i8m1_i32m1 (vd, vs1, vs2, vl); +} + +/* +** test_vqrdot_2s_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqrdot\.2s\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_vqrdot_2s_vv_i8_m (vbool8_t mask, vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqrdot_2s_vv_i8m1_i32m1_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vqrdot_2s_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vqrdot\.2s\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint64m1_t test_vqrdot_2s_vv_i16 (vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqrdot_2s_vv_i16m1_i64m1 (vd, vs1, vs2, vl); +} + +/* +** test_vqrdot_2s_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vqrdot\.2s\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint64m1_t test_vqrdot_2s_vv_i16_m (vbool16_t mask, vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqrdot_2s_vv_i16m1_i64m1_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdot_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdot_vv-compile-1.c index 08c74c4a45c0..f8cdb595e109 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdot_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdot_vv-compile-1.c @@ -1,17 +1,53 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m1_t test_vqrdot_vv_i8 (vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vqrdot_vv_i8m1_i32m1 (vd, vs1, vs2, vl); } -vint32m1_t test_vqrdot_vv_i8_m (vbool8_t mask, vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vqrdot_vv_i8m1_i32m1_m (mask, vd, vs1, vs2, vl); } -vint64m1_t test_vqrdot_vv_i16 (vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vqrdot_vv_i16m1_i64m1 (vd, vs1, vs2, vl); } -vint64m1_t test_vqrdot_vv_i16_m (vbool16_t mask, vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vqrdot_vv_i16m1_i64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqrdot\\.vv" 4 } } */ \ No newline at end of file +/* +** test_vqrdot_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m1_t test_vqrdot_vv_i8 (vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqrdot_vv_i8m1_i32m1 (vd, vs1, vs2, vl); +} + +/* +** test_vqrdot_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_vqrdot_vv_i8_m (vbool8_t mask, vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqrdot_vv_i8m1_i32m1_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vqrdot_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vqrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint64m1_t test_vqrdot_vv_i16 (vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqrdot_vv_i16m1_i64m1 (vd, vs1, vs2, vl); +} + +/* +** test_vqrdot_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vqrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint64m1_t test_vqrdot_vv_i16_m (vbool16_t mask, vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqrdot_vv_i16m1_i64m1_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdotsu_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdotsu_vv-compile-1.c index 712f37aab378..6d0bcb576aa4 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdotsu_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdotsu_vv-compile-1.c @@ -1,17 +1,53 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m1_t test_vqrdotsu_vv_i8 (vint32m1_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_arcv_vqrdotsu_vv_i8m1_i32m1 (vd, vs1, vs2, vl); } -vint32m1_t test_vqrdotsu_vv_i8_m (vbool8_t mask, vint32m1_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_arcv_vqrdotsu_vv_i8m1_i32m1_m (mask, vd, vs1, vs2, vl); } -vint64m1_t test_vqrdotsu_vv_i16 (vint64m1_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_arcv_vqrdotsu_vv_i16m1_i64m1 (vd, vs1, vs2, vl); } -vint64m1_t test_vqrdotsu_vv_i16_m (vbool16_t mask, vint64m1_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_arcv_vqrdotsu_vv_i16m1_i64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqrdotsu\\.vv" 4 } } */ \ No newline at end of file +/* +** test_vqrdotsu_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqrdotsu\.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m1_t test_vqrdotsu_vv_i8 (vint32m1_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqrdotsu_vv_i8m1_i32m1 (vd, vs1, vs2, vl); +} + +/* +** test_vqrdotsu_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqrdotsu\.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m1_t test_vqrdotsu_vv_i8_m (vbool8_t mask, vint32m1_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqrdotsu_vv_i8m1_i32m1_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vqrdotsu_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vqrdotsu\.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m1_t test_vqrdotsu_vv_i16 (vint64m1_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqrdotsu_vv_i16m1_i64m1 (vd, vs1, vs2, vl); +} + +/* +** test_vqrdotsu_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vqrdotsu\.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m1_t test_vqrdotsu_vv_i16_m (vbool16_t mask, vint64m1_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqrdotsu_vv_i16m1_i64m1_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdotu_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdotu_vv-compile-1.c index ac65d1c2f786..f700fa0c6a78 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdotu_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdotu_vv-compile-1.c @@ -1,17 +1,53 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vuint32m1_t test_vqrdotu_vv_u8 (vuint32m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_arcv_vqrdotu_vv_u8m1_u32m1 (vd, vs1, vs2, vl); } -vuint32m1_t test_vqrdotu_vv_u8_m (vbool8_t mask, vuint32m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_arcv_vqrdotu_vv_u8m1_u32m1_m (mask, vd, vs1, vs2, vl); } -vuint64m1_t test_vqrdotu_vv_u16 (vuint64m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_arcv_vqrdotu_vv_u16m1_u64m1 (vd, vs1, vs2, vl); } -vuint64m1_t test_vqrdotu_vv_u16_m (vbool16_t mask, vuint64m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_arcv_vqrdotu_vv_u16m1_u64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqrdotu\\.vv" 4 } } */ \ No newline at end of file +/* +** test_vqrdotu_vv_u8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqrdotu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vuint32m1_t test_vqrdotu_vv_u8 (vuint32m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqrdotu_vv_u8m1_u32m1 (vd, vs1, vs2, vl); +} + +/* +** test_vqrdotu_vv_u8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqrdotu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vuint32m1_t test_vqrdotu_vv_u8_m (vbool8_t mask, vuint32m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqrdotu_vv_u8m1_u32m1_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vqrdotu_vv_u16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vqrdotu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vuint64m1_t test_vqrdotu_vv_u16 (vuint64m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqrdotu_vv_u16m1_u64m1 (vd, vs1, vs2, vl); +} + +/* +** test_vqrdotu_vv_u16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vqrdotu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vuint64m1_t test_vqrdotu_vv_u16_m (vbool16_t mask, vuint64m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vqrdotu_vv_u16m1_u64m1_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsaddsub_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsaddsub_vv-compile-1.c index 53a4bf8d6ee7..c532abb39125 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsaddsub_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsaddsub_vv-compile-1.c @@ -1,21 +1,75 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m1_t test_vsaddsub_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vsaddsub_vv_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vsaddsub_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vsaddsub_vv_i16m1_m (mask, vs2, vs1, vl); } -vint32m1_t test_vsaddsub_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vsaddsub_vv_i32m1 (vs2, vs1, vl); } -vint32m1_t test_vsaddsub_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vsaddsub_vv_i32m1_m (mask, vs2, vs1, vl); } -vint64m1_t test_vsaddsub_vv_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) { - return __riscv_arcv_vsaddsub_vv_i64m1 (vs2, vs1, vl); } -vint64m1_t test_vsaddsub_vv_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { - return __riscv_arcv_vsaddsub_vv_i64m1_m (mask, vs2, vs1, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vsaddsub\\.vv" 6 } } */ \ No newline at end of file + +/* +** test_vsaddsub_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vsaddsub\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint16m1_t test_vsaddsub_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsaddsub_vv_i16m1 (vs2, vs1, vl); +} + +/* +** test_vsaddsub_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vsaddsub\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint16m1_t test_vsaddsub_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsaddsub_vv_i16m1_m (mask, vs2, vs1, vl); +} + +/* +** test_vsaddsub_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vsaddsub\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m1_t test_vsaddsub_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsaddsub_vv_i32m1 (vs2, vs1, vl); +} + +/* +** test_vsaddsub_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vsaddsub\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_vsaddsub_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsaddsub_vv_i32m1_m (mask, vs2, vs1, vl); +} + +/* +** test_vsaddsub_vv_i64: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vsaddsub\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint64m1_t test_vsaddsub_vv_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsaddsub_vv_i64m1 (vs2, vs1, vl); +} + +/* +** test_vsaddsub_vv_i64_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vsaddsub\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint64m1_t test_vsaddsub_vv_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsaddsub_vv_i64m1_m (mask, vs2, vs1, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsmulf_hv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsmulf_hv-compile-1.c index 241676c6fe6a..f82ce0bf0253 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsmulf_hv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsmulf_hv-compile-1.c @@ -11,7 +11,7 @@ ** test_vsmulf_hv_i16: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vsmulf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vsmulf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint16m1_t @@ -24,7 +24,7 @@ test_vsmulf_hv_i16 (vint8mf2_t vs2, vint16m1_t vs1, size_t vl) ** test_vsmulf_hv_i16_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vsmulf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vsmulf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint16m1_t @@ -37,7 +37,7 @@ test_vsmulf_hv_i16_m (vbool16_t mask, vint8mf2_t vs2, vint16m1_t vs1, size_t vl) ** test_vsmulf_hv_i32: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] -** arcv.vsmulf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vsmulf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint32m1_t @@ -50,7 +50,7 @@ test_vsmulf_hv_i32 (vint16mf2_t vs2, vint32m1_t vs1, size_t vl) ** test_vsmulf_hv_i32_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] -** arcv.vsmulf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vsmulf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint32m1_t diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsmulf_hx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsmulf_hx-compile-1.c index d196897eac4d..01ac02b8b48d 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsmulf_hx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsmulf_hx-compile-1.c @@ -11,7 +11,7 @@ ** test_vsmulf_hx_i16: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vsmulf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vsmulf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint16m1_t @@ -24,7 +24,7 @@ test_vsmulf_hx_i16 (vint8mf2_t vs2, int vs1, size_t vl) ** test_vsmulf_hx_i16_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] -** arcv.vsmulf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vsmulf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint16m1_t @@ -37,7 +37,7 @@ test_vsmulf_hx_i16_m (vbool16_t mask, vint8mf2_t vs2, int vs1, size_t vl) ** test_vsmulf_hx_i32: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] -** arcv.vsmulf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vsmulf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint32m1_t @@ -50,7 +50,7 @@ test_vsmulf_hx_i32 (vint16mf2_t vs2, int vs1, size_t vl) ** test_vsmulf_hx_i32_m: ** csrwi\s+vxrm,0 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] -** arcv.vsmulf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vsmulf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint32m1_t diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsneg_v-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsneg_v-compile-1.c index dce4ab06539e..d8b3d475ab12 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsneg_v-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsneg_v-compile-1.c @@ -1,25 +1,97 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vsneg_v_i8 (vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vsneg_v_i8m1 (vs2, vl); } -vint8m1_t test_vsneg_v_i8_m (vbool8_t mask, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vsneg_v_i8m1_m (mask, vs2, vl); } -vint16m1_t test_vsneg_v_i16 (vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vsneg_v_i16m1 (vs2, vl); } -vint16m1_t test_vsneg_v_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vsneg_v_i16m1_m (mask, vs2, vl); } -vint32m1_t test_vsneg_v_i32 (vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vsneg_v_i32m1 (vs2, vl); } -vint32m1_t test_vsneg_v_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vsneg_v_i32m1_m (mask, vs2, vl); } -vint64m1_t test_vsneg_v_i64 (vint64m1_t vs2, size_t vl) { - return __riscv_arcv_vsneg_v_i64m1 (vs2, vl); } -vint64m1_t test_vsneg_v_i64_m (vbool64_t mask, vint64m1_t vs2, size_t vl) { - return __riscv_arcv_vsneg_v_i64m1_m (mask, vs2, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vsneg\\.v" 8 } } */ \ No newline at end of file + +/* +** test_vsneg_v_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vsneg\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint8m1_t test_vsneg_v_i8 (vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vsneg_v_i8m1 (vs2, vl); +} + +/* +** test_vsneg_v_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vsneg\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint8m1_t test_vsneg_v_i8_m (vbool8_t mask, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vsneg_v_i8m1_m (mask, vs2, vl); +} + +/* +** test_vsneg_v_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vsneg\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint16m1_t test_vsneg_v_i16 (vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vsneg_v_i16m1 (vs2, vl); +} + +/* +** test_vsneg_v_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vsneg\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint16m1_t test_vsneg_v_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vsneg_v_i16m1_m (mask, vs2, vl); +} + +/* +** test_vsneg_v_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vsneg\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m1_t test_vsneg_v_i32 (vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vsneg_v_i32m1 (vs2, vl); +} + +/* +** test_vsneg_v_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vsneg\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_vsneg_v_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vsneg_v_i32m1_m (mask, vs2, vl); +} + +/* +** test_vsneg_v_i64: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vsneg\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint64m1_t test_vsneg_v_i64 (vint64m1_t vs2, size_t vl) +{ + return __riscv_arcv_vsneg_v_i64m1 (vs2, vl); +} + +/* +** test_vsneg_v_i64_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vsneg\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint64m1_t test_vsneg_v_i64_m (vbool64_t mask, vint64m1_t vs2, size_t vl) +{ + return __riscv_arcv_vsneg_v_i64m1_m (mask, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vi-compile-1.c index 1c68a6a9c031..6b4562ed1e92 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vi-compile-1.c @@ -4,7 +4,6 @@ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ /* { dg-final { check-function-bodies "**" "" } } */ - #include #include diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vi-compile-1.c index 4060f48cb90c..c8b38ff0e5c0 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vi-compile-1.c @@ -4,7 +4,6 @@ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ /* { dg-final { check-function-bodies "**" "" } } */ - #include #include diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsrat_vi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsrat_vi-compile-1.c index 53860217102a..8c2978b065bd 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsrat_vi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsrat_vi-compile-1.c @@ -1,25 +1,97 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vsrat_vi_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vsrat_vx_i8m1 (vs2, 1, vl); } -vint8m1_t test_vsrat_vi_i8_m (vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vsrat_vx_i8m1_m (mask, vs2, 1, vl); } -vint16m1_t test_vsrat_vi_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vsrat_vx_i16m1 (vs2, 1, vl); } -vint16m1_t test_vsrat_vi_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vsrat_vx_i16m1_m (mask, vs2, 1, vl); } -vint32m1_t test_vsrat_vi_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vsrat_vx_i32m1 (vs2, 1, vl); } -vint32m1_t test_vsrat_vi_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vsrat_vx_i32m1_m (mask, vs2, 1, vl); } -vint64m1_t test_vsrat_vi_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) { - return __riscv_arcv_vsrat_vx_i64m1 (vs2, 1, vl); } -vint64m1_t test_vsrat_vi_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { - return __riscv_arcv_vsrat_vx_i64m1_m (mask, vs2, 1, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vsrat\\.vi" 8 } } */ \ No newline at end of file + +/* +** test_vsrat_vi_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vi\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ +vint8m1_t test_vsrat_vi_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsrat_vx_i8m1 (vs2, 1, vl); +} + +/* +** test_vsrat_vi_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vi\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1,\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint8m1_t test_vsrat_vi_i8_m (vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsrat_vx_i8m1_m (mask, vs2, 1, vl); +} + +/* +** test_vsrat_vi_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vi\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ +vint16m1_t test_vsrat_vi_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsrat_vx_i16m1 (vs2, 1, vl); +} + +/* +** test_vsrat_vi_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vi\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1,\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint16m1_t test_vsrat_vi_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsrat_vx_i16m1_m (mask, vs2, 1, vl); +} + +/* +** test_vsrat_vi_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vi\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ +vint32m1_t test_vsrat_vi_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsrat_vx_i32m1 (vs2, 1, vl); +} + +/* +** test_vsrat_vi_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vi\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1,\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_vsrat_vi_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsrat_vx_i32m1_m (mask, vs2, 1, vl); +} + +/* +** test_vsrat_vi_i64: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vi\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ +vint64m1_t test_vsrat_vi_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsrat_vx_i64m1 (vs2, 1, vl); +} + +/* +** test_vsrat_vi_i64_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vi\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1,\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint64m1_t test_vsrat_vi_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsrat_vx_i64m1_m (mask, vs2, 1, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsrat_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsrat_vv-compile-1.c index 31e97bf93328..65c1cfe4f49d 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsrat_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsrat_vv-compile-1.c @@ -1,25 +1,97 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vsrat_vv_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vsrat_vv_i8m1 (vs2, vs1, vl); } -vint8m1_t test_vsrat_vv_i8_m (vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vsrat_vv_i8m1_m (mask, vs2, vs1, vl); } -vint16m1_t test_vsrat_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vsrat_vv_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vsrat_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vsrat_vv_i16m1_m (mask, vs2, vs1, vl); } -vint32m1_t test_vsrat_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vsrat_vv_i32m1 (vs2, vs1, vl); } -vint32m1_t test_vsrat_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vsrat_vv_i32m1_m (mask, vs2, vs1, vl); } -vint64m1_t test_vsrat_vv_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) { - return __riscv_arcv_vsrat_vv_i64m1 (vs2, vs1, vl); } -vint64m1_t test_vsrat_vv_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { - return __riscv_arcv_vsrat_vv_i64m1_m (mask, vs2, vs1, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vsrat\\.vv" 8 } } */ \ No newline at end of file + +/* +** test_vsrat_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint8m1_t test_vsrat_vv_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsrat_vv_i8m1 (vs2, vs1, vl); +} + +/* +** test_vsrat_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint8m1_t test_vsrat_vv_i8_m (vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsrat_vv_i8m1_m (mask, vs2, vs1, vl); +} + +/* +** test_vsrat_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint16m1_t test_vsrat_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsrat_vv_i16m1 (vs2, vs1, vl); +} + +/* +** test_vsrat_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint16m1_t test_vsrat_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsrat_vv_i16m1_m (mask, vs2, vs1, vl); +} + +/* +** test_vsrat_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m1_t test_vsrat_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsrat_vv_i32m1 (vs2, vs1, vl); +} + +/* +** test_vsrat_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_vsrat_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsrat_vv_i32m1_m (mask, vs2, vs1, vl); +} + +/* +** test_vsrat_vv_i64: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint64m1_t test_vsrat_vv_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsrat_vv_i64m1 (vs2, vs1, vl); +} + +/* +** test_vsrat_vv_i64_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint64m1_t test_vsrat_vv_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsrat_vv_i64m1_m (mask, vs2, vs1, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsrat_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsrat_vx-compile-1.c index 84da17fed329..dddcc0442e83 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsrat_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsrat_vx-compile-1.c @@ -1,25 +1,101 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vsrat_vx_i8 (vint8m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsrat_vx_i8m1 (vs2, vs1, vl); } -vint8m1_t test_vsrat_vx_i8_m (vbool8_t mask, vint8m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsrat_vx_i8m1_m (mask, vs2, vs1, vl); } -vint16m1_t test_vsrat_vx_i16 (vint16m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsrat_vx_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vsrat_vx_i16_m (vbool16_t mask, vint16m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsrat_vx_i16m1_m (mask, vs2, vs1, vl); } -vint32m1_t test_vsrat_vx_i32 (vint32m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsrat_vx_i32m1 (vs2, vs1, vl); } -vint32m1_t test_vsrat_vx_i32_m (vbool32_t mask, vint32m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsrat_vx_i32m1_m (mask, vs2, vs1, vl); } -vint64m1_t test_vsrat_vx_i64 (vint64m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsrat_vx_i64m1 (vs2, vs1, vl); } -vint64m1_t test_vsrat_vx_i64_m (vbool64_t mask, vint64m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsrat_vx_i64m1_m (mask, vs2, vs1, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vsrat\\.vx" 8 } } */ \ No newline at end of file + +/* +** test_vsrat_vx_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vx\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t test_vsrat_vx_i8 (vint8m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsrat_vx_i8m1 (vs2, vs1, vl); +} + +/* +** test_vsrat_vx_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vx\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint8m1_t test_vsrat_vx_i8_m (vbool8_t mask, vint8m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsrat_vx_i8m1_m (mask, vs2, vs1, vl); +} + +/* +** test_vsrat_vx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vx\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t test_vsrat_vx_i16 (vint16m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsrat_vx_i16m1 (vs2, vs1, vl); +} + +/* +** test_vsrat_vx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vx\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint16m1_t test_vsrat_vx_i16_m (vbool16_t mask, vint16m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsrat_vx_i16m1_m (mask, vs2, vs1, vl); +} + +/* +** test_vsrat_vx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vx\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t test_vsrat_vx_i32 (vint32m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsrat_vx_i32m1 (vs2, vs1, vl); +} + +/* +** test_vsrat_vx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vx\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_vsrat_vx_i32_m (vbool32_t mask, vint32m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsrat_vx_i32m1_m (mask, vs2, vs1, vl); +} + +/* +** test_vsrat_vx_i64: +** mv a4,a0 +** srai a5,a0,31 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vx\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m1_t test_vsrat_vx_i64 (vint64m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsrat_vx_i64m1 (vs2, vs1, vl); +} + +/* +** test_vsrat_vx_i64_m: +** mv a4,a0 +** srai a5,a0,31 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vx\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint64m1_t test_vsrat_vx_i64_m (vbool64_t mask, vint64m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsrat_vx_i64m1_m (mask, vs2, vs1, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vssabs_v-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vssabs_v-compile-1.c index e876a6646651..226b90fe1906 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vssabs_v-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vssabs_v-compile-1.c @@ -1,25 +1,97 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vssabs_v_i8 (vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vssabs_v_i8m1 (vs2, vl); } -vint8m1_t test_vssabs_v_i8_m (vbool8_t mask, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vssabs_v_i8m1_m (mask, vs2, vl); } -vint16m1_t test_vssabs_v_i16 (vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vssabs_v_i16m1 (vs2, vl); } -vint16m1_t test_vssabs_v_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vssabs_v_i16m1_m (mask, vs2, vl); } -vint32m1_t test_vssabs_v_i32 (vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vssabs_v_i32m1 (vs2, vl); } -vint32m1_t test_vssabs_v_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vssabs_v_i32m1_m (mask, vs2, vl); } -vint64m1_t test_vssabs_v_i64 (vint64m1_t vs2, size_t vl) { - return __riscv_arcv_vssabs_v_i64m1 (vs2, vl); } -vint64m1_t test_vssabs_v_i64_m (vbool64_t mask, vint64m1_t vs2, size_t vl) { - return __riscv_arcv_vssabs_v_i64m1_m (mask, vs2, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vssabs\\.v" 8 } } */ \ No newline at end of file + +/* +** test_vssabs_v_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vssabs\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint8m1_t test_vssabs_v_i8 (vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vssabs_v_i8m1 (vs2, vl); +} + +/* +** test_vssabs_v_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vssabs\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint8m1_t test_vssabs_v_i8_m (vbool8_t mask, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vssabs_v_i8m1_m (mask, vs2, vl); +} + +/* +** test_vssabs_v_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vssabs\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint16m1_t test_vssabs_v_i16 (vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vssabs_v_i16m1 (vs2, vl); +} + +/* +** test_vssabs_v_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vssabs\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint16m1_t test_vssabs_v_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vssabs_v_i16m1_m (mask, vs2, vl); +} + +/* +** test_vssabs_v_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vssabs\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m1_t test_vssabs_v_i32 (vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vssabs_v_i32m1 (vs2, vl); +} + +/* +** test_vssabs_v_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vssabs\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_vssabs_v_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vssabs_v_i32m1_m (mask, vs2, vl); +} + +/* +** test_vssabs_v_i64: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vssabs\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint64m1_t test_vssabs_v_i64 (vint64m1_t vs2, size_t vl) +{ + return __riscv_arcv_vssabs_v_i64m1 (vs2, vl); +} + +/* +** test_vssabs_v_i64_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vssabs\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint64m1_t test_vssabs_v_i64_m (vbool64_t mask, vint64m1_t vs2, size_t vl) +{ + return __riscv_arcv_vssabs_v_i64m1_m (mask, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmac_hv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmac_hv-compile-1.c index 61ba3852f84d..010e72fcfed0 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmac_hv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmac_hv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwmac_hv_i16 (vint32m2_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmac_hv_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwmac_hv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmac_hv_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwmac_hv_i32 (vint64m2_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmac_hv_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwmac_hv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmac_hv_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwmac_hv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmac.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwmac_hv_i16 (vint32m2_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmac_hv_i32m2 (vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwmac\\.hv" 4 } } */ \ No newline at end of file +/* +** test_vwmac_hv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmac.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwmac_hv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmac_hv_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwmac_hv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmac.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwmac_hv_i32 (vint64m2_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmac_hv_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwmac_hv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmac.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwmac_hv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmac_hv_i64m2_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmac_hx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmac_hx-compile-1.c index a2da935e1f2e..0e1eca934269 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmac_hx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmac_hx-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwmac_hx_i16 (vint32m2_t vd, int vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmac_hx_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwmac_hx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmac_hx_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwmac_hx_i32 (vint64m2_t vd, int vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmac_hx_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwmac_hx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmac_hx_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwmac_hx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmac.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwmac_hx_i16 (vint32m2_t vd, int vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmac_hx_i32m2 (vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwmac\\.hx" 4 } } */ \ No newline at end of file +/* +** test_vwmac_hx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmac.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwmac_hx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmac_hx_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwmac_hx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmac.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwmac_hx_i32 (vint64m2_t vd, int vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmac_hx_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwmac_hx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmac.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwmac_hx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmac_hx_i64m2_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmacu_hv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmacu_hv-compile-1.c index 57f8b7450b84..3364fb85d733 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmacu_hv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmacu_hv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vuint32m2_t test_vwmacu_hv_u16 (vuint32m2_t vd, vuint16m1_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmacu_hv_u32m2 (vd, vs1, vs2, vl); } -vuint32m2_t test_vwmacu_hv_u16_m (vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmacu_hv_u32m2_m (mask, vd, vs1, vs2, vl); } -vuint64m2_t test_vwmacu_hv_u32 (vuint64m2_t vd, vuint32m1_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmacu_hv_u64m2 (vd, vs1, vs2, vl); } -vuint64m2_t test_vwmacu_hv_u32_m (vbool32_t mask, vuint64m2_t vd, vuint32m1_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmacu_hv_u64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwmacu_hv_u16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmacu.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vuint32m2_t +test_vwmacu_hv_u16 (vuint32m2_t vd, vuint16m1_t vs1, vuint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmacu_hv_u32m2 (vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwmacu\\.hv" 4 } } */ \ No newline at end of file +/* +** test_vwmacu_hv_u16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmacu.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vuint32m2_t +test_vwmacu_hv_u16_m (vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmacu_hv_u32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwmacu_hv_u32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmacu.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vuint64m2_t +test_vwmacu_hv_u32 (vuint64m2_t vd, vuint32m1_t vs1, vuint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmacu_hv_u64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwmacu_hv_u32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmacu.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vuint64m2_t +test_vwmacu_hv_u32_m (vbool32_t mask, vuint64m2_t vd, vuint32m1_t vs1, vuint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmacu_hv_u64m2_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmacu_hx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmacu_hx-compile-1.c index 211491ace18f..8328a9c9d3c9 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmacu_hx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmacu_hx-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vuint32m2_t test_vwmacu_hx_u16 (vuint32m2_t vd, int vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmacu_hx_u32m2 (vd, vs1, vs2, vl); } -vuint32m2_t test_vwmacu_hx_u16_m (vbool16_t mask, vuint32m2_t vd, int vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmacu_hx_u32m2_m (mask, vd, vs1, vs2, vl); } -vuint64m2_t test_vwmacu_hx_u32 (vuint64m2_t vd, int vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmacu_hx_u64m2 (vd, vs1, vs2, vl); } -vuint64m2_t test_vwmacu_hx_u32_m (vbool32_t mask, vuint64m2_t vd, int vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmacu_hx_u64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwmacu_hx_u16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmacu.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vuint32m2_t +test_vwmacu_hx_u16 (vuint32m2_t vd, int vs1, vuint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmacu_hx_u32m2 (vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwmacu\\.hx" 4 } } */ \ No newline at end of file +/* +** test_vwmacu_hx_u16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmacu.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vuint32m2_t +test_vwmacu_hx_u16_m (vbool16_t mask, vuint32m2_t vd, int vs1, vuint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmacu_hx_u32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwmacu_hx_u32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmacu.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vuint64m2_t +test_vwmacu_hx_u32 (vuint64m2_t vd, int vs1, vuint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmacu_hx_u64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwmacu_hx_u32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmacu.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vuint64m2_t +test_vwmacu_hx_u32_m (vbool32_t mask, vuint64m2_t vd, int vs1, vuint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmacu_hx_u64m2_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmul_hv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmul_hv-compile-1.c index 39fd9166d20b..45aeba430bc4 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmul_hv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmul_hv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwmul_hv_i16 (vint8mf2_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vwmul_hv_i32m2 (vs2, vs1, vl); } -vint32m2_t test_vwmul_hv_i16_m (vbool16_t mask, vint8mf2_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vwmul_hv_i32m2_m (mask, vs2, vs1, vl); } -vint64m2_t test_vwmul_hv_i32 (vint16mf2_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vwmul_hv_i64m2 (vs2, vs1, vl); } -vint64m2_t test_vwmul_hv_i32_m (vbool32_t mask, vint16mf2_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vwmul_hv_i64m2_m (mask, vs2, vs1, vl); } +/* +** test_vwmul_hv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwmul.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwmul_hv_i16 (vint8mf2_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwmul_hv_i32m2 (vs2, vs1, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwmul\\.hv" 4 } } */ \ No newline at end of file +/* +** test_vwmul_hv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwmul.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwmul_hv_i16_m (vbool16_t mask, vint8mf2_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwmul_hv_i32m2_m (mask, vs2, vs1, vl); +} + +/* +** test_vwmul_hv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwmul.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwmul_hv_i32 (vint16mf2_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwmul_hv_i64m2 (vs2, vs1, vl); +} + +/* +** test_vwmul_hv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwmul.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwmul_hv_i32_m (vbool32_t mask, vint16mf2_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwmul_hv_i64m2_m (mask, vs2, vs1, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmul_hx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmul_hx-compile-1.c index 6b7d148992bd..230ff2e877b1 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmul_hx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmul_hx-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwmul_hx_i16 (vint8mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwmul_hx_i32m2 (vs2, vs1, vl); } -vint32m2_t test_vwmul_hx_i16_m (vbool16_t mask, vint8mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwmul_hx_i32m2_m (mask, vs2, vs1, vl); } -vint64m2_t test_vwmul_hx_i32 (vint16mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwmul_hx_i64m2 (vs2, vs1, vl); } -vint64m2_t test_vwmul_hx_i32_m (vbool32_t mask, vint16mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwmul_hx_i64m2_m (mask, vs2, vs1, vl); } +/* +** test_vwmul_hx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwmul.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m2_t +test_vwmul_hx_i16 (vint8mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwmul_hx_i32m2 (vs2, vs1, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwmul\\.hx" 4 } } */ \ No newline at end of file +/* +** test_vwmul_hx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwmul.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m2_t +test_vwmul_hx_i16_m (vbool16_t mask, vint8mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwmul_hx_i32m2_m (mask, vs2, vs1, vl); +} + +/* +** test_vwmul_hx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwmul.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m2_t +test_vwmul_hx_i32 (vint16mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwmul_hx_i64m2 (vs2, vs1, vl); +} + +/* +** test_vwmul_hx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwmul.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m2_t +test_vwmul_hx_i32_m (vbool32_t mask, vint16mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwmul_hx_i64m2_m (mask, vs2, vs1, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulf_hv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulf_hv-compile-1.c index e4e56640f2e8..bd4efef73f06 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulf_hv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulf_hv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwmulf_hv_i16 (vint8mf2_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vwmulf_hv_i32m2 (vs2, vs1, vl); } -vint32m2_t test_vwmulf_hv_i16_m (vbool16_t mask, vint8mf2_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vwmulf_hv_i32m2_m (mask, vs2, vs1, vl); } -vint64m2_t test_vwmulf_hv_i32 (vint16mf2_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vwmulf_hv_i64m2 (vs2, vs1, vl); } -vint64m2_t test_vwmulf_hv_i32_m (vbool32_t mask, vint16mf2_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vwmulf_hv_i64m2_m (mask, vs2, vs1, vl); } +/* +** test_vwmulf_hv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwmulf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwmulf_hv_i16 (vint8mf2_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwmulf_hv_i32m2 (vs2, vs1, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwmulf\\.hv" 4 } } */ \ No newline at end of file +/* +** test_vwmulf_hv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwmulf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwmulf_hv_i16_m (vbool16_t mask, vint8mf2_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwmulf_hv_i32m2_m (mask, vs2, vs1, vl); +} + +/* +** test_vwmulf_hv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwmulf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwmulf_hv_i32 (vint16mf2_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwmulf_hv_i64m2 (vs2, vs1, vl); +} + +/* +** test_vwmulf_hv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwmulf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwmulf_hv_i32_m (vbool32_t mask, vint16mf2_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwmulf_hv_i64m2_m (mask, vs2, vs1, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulf_hx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulf_hx-compile-1.c index b365f11a5db5..bf504ab2ec17 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulf_hx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulf_hx-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwmulf_hx_i16 (vint8mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwmulf_hx_i32m2 (vs2, vs1, vl); } -vint32m2_t test_vwmulf_hx_i16_m (vbool16_t mask, vint8mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwmulf_hx_i32m2_m (mask, vs2, vs1, vl); } -vint64m2_t test_vwmulf_hx_i32 (vint16mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwmulf_hx_i64m2 (vs2, vs1, vl); } -vint64m2_t test_vwmulf_hx_i32_m (vbool32_t mask, vint16mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwmulf_hx_i64m2_m (mask, vs2, vs1, vl); } +/* +** test_vwmulf_hx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwmulf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m2_t +test_vwmulf_hx_i16 (vint8mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwmulf_hx_i32m2 (vs2, vs1, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwmulf\\.hx" 4 } } */ \ No newline at end of file +/* +** test_vwmulf_hx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwmulf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m2_t +test_vwmulf_hx_i16_m (vbool16_t mask, vint8mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwmulf_hx_i32m2_m (mask, vs2, vs1, vl); +} + +/* +** test_vwmulf_hx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwmulf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m2_t +test_vwmulf_hx_i32 (vint16mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwmulf_hx_i64m2 (vs2, vs1, vl); +} + +/* +** test_vwmulf_hx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwmulf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m2_t +test_vwmulf_hx_i32_m (vbool32_t mask, vint16mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwmulf_hx_i64m2_m (mask, vs2, vs1, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulu_hv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulu_hv-compile-1.c index 05afa6b2933c..c359c9a2da10 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulu_hv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulu_hv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vuint32m2_t test_vwmulu_hv_u16 (vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_arcv_vwmulu_hv_u32m2 (vs2, vs1, vl); } -vuint32m2_t test_vwmulu_hv_u16_m (vbool16_t mask, vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_arcv_vwmulu_hv_u32m2_m (mask, vs2, vs1, vl); } -vuint64m2_t test_vwmulu_hv_u32 (vuint16mf2_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_arcv_vwmulu_hv_u64m2 (vs2, vs1, vl); } -vuint64m2_t test_vwmulu_hv_u32_m (vbool32_t mask, vuint16mf2_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_arcv_vwmulu_hv_u64m2_m (mask, vs2, vs1, vl); } +/* +** test_vwmulu_hv_u16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwmulu.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vuint32m2_t +test_vwmulu_hv_u16 (vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwmulu_hv_u32m2 (vs2, vs1, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwmulu\\.hv" 4 } } */ \ No newline at end of file +/* +** test_vwmulu_hv_u16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwmulu.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vuint32m2_t +test_vwmulu_hv_u16_m (vbool16_t mask, vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwmulu_hv_u32m2_m (mask, vs2, vs1, vl); +} + +/* +** test_vwmulu_hv_u32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwmulu.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vuint64m2_t +test_vwmulu_hv_u32 (vuint16mf2_t vs2, vuint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwmulu_hv_u64m2 (vs2, vs1, vl); +} + +/* +** test_vwmulu_hv_u32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwmulu.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vuint64m2_t +test_vwmulu_hv_u32_m (vbool32_t mask, vuint16mf2_t vs2, vuint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwmulu_hv_u64m2_m (mask, vs2, vs1, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulu_hx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulu_hx-compile-1.c index 5f61f1b5e392..353c30df1278 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulu_hx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulu_hx-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vuint32m2_t test_vwmulu_hx_u16 (vuint8mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwmulu_hx_u32m2 (vs2, vs1, vl); } -vuint32m2_t test_vwmulu_hx_u16_m (vbool16_t mask, vuint8mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwmulu_hx_u32m2_m (mask, vs2, vs1, vl); } -vuint64m2_t test_vwmulu_hx_u32 (vuint16mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwmulu_hx_u64m2 (vs2, vs1, vl); } -vuint64m2_t test_vwmulu_hx_u32_m (vbool32_t mask, vuint16mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwmulu_hx_u64m2_m (mask, vs2, vs1, vl); } +/* +** test_vwmulu_hx_u16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwmulu.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vuint32m2_t +test_vwmulu_hx_u16 (vuint8mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwmulu_hx_u32m2 (vs2, vs1, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwmulu\\.hx" 4 } } */ \ No newline at end of file +/* +** test_vwmulu_hx_u16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwmulu.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vuint32m2_t +test_vwmulu_hx_u16_m (vbool16_t mask, vuint8mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwmulu_hx_u32m2_m (mask, vs2, vs1, vl); +} + +/* +** test_vwmulu_hx_u32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwmulu.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vuint64m2_t +test_vwmulu_hx_u32 (vuint16mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwmulu_hx_u64m2 (vs2, vs1, vl); +} + +/* +** test_vwmulu_hx_u32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwmulu.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vuint64m2_t +test_vwmulu_hx_u32_m (vbool32_t mask, vuint16mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwmulu_hx_u64m2_m (mask, vs2, vs1, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdot_hv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdot_hv-compile-1.c index d4f318ad6fa4..c970ddaf2bb3 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdot_hv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdot_hv-compile-1.c @@ -1,17 +1,53 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m1_t test_vwrdot_hv_i16 (vint32m1_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwrdot_hv_i16m1_i32m1 (vd, vs1, vs2, vl); } -vint32m1_t test_vwrdot_hv_i16_m (vbool16_t mask, vint32m1_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwrdot_hv_i16m1_i32m1_m (mask, vd, vs1, vs2, vl); } -vint64m1_t test_vwrdot_hv_i32 (vint64m1_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwrdot_hv_i32m1_i64m1 (vd, vs1, vs2, vl); } -vint64m1_t test_vwrdot_hv_i32_m (vbool32_t mask, vint64m1_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwrdot_hv_i32m1_i64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vwrdot\\.hv" 4 } } */ \ No newline at end of file +/* +** test_vwrdot_hv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwrdot\.hv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m1_t test_vwrdot_hv_i16 (vint32m1_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwrdot_hv_i16m1_i32m1 (vd, vs1, vs2, vl); +} + +/* +** test_vwrdot_hv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwrdot\.hv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_vwrdot_hv_i16_m (vbool16_t mask, vint32m1_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwrdot_hv_i16m1_i32m1_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwrdot_hv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwrdot\.hv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint64m1_t test_vwrdot_hv_i32 (vint64m1_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwrdot_hv_i32m1_i64m1 (vd, vs1, vs2, vl); +} + +/* +** test_vwrdot_hv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwrdot\.hv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint64m1_t test_vwrdot_hv_i32_m (vbool32_t mask, vint64m1_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwrdot_hv_i32m1_i64m1_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdot_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdot_vv-compile-1.c index 49a056d0aff7..f172547b5eea 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdot_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdot_vv-compile-1.c @@ -1,21 +1,75 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m1_t test_vwrdot_vv_i8 (vint16m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwrdot_vv_i8m1_i16m1 (vd, vs1, vs2, vl); } -vint16m1_t test_vwrdot_vv_i8_m (vbool8_t mask, vint16m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwrdot_vv_i8m1_i16m1_m (mask, vd, vs1, vs2, vl); } -vint32m1_t test_vwrdot_vv_i16 (vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwrdot_vv_i16m1_i32m1 (vd, vs1, vs2, vl); } -vint32m1_t test_vwrdot_vv_i16_m (vbool16_t mask, vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwrdot_vv_i16m1_i32m1_m (mask, vd, vs1, vs2, vl); } -vint64m1_t test_vwrdot_vv_i32 (vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwrdot_vv_i32m1_i64m1 (vd, vs1, vs2, vl); } -vint64m1_t test_vwrdot_vv_i32_m (vbool32_t mask, vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwrdot_vv_i32m1_i64m1_m (mask, vd, vs1, vs2, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vwrdot\\.vv" 6 } } */ \ No newline at end of file + +/* +** test_vwrdot_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vwrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint16m1_t test_vwrdot_vv_i8 (vint16m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwrdot_vv_i8m1_i16m1 (vd, vs1, vs2, vl); +} + +/* +** test_vwrdot_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vwrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint16m1_t test_vwrdot_vv_i8_m (vbool8_t mask, vint16m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwrdot_vv_i8m1_i16m1_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwrdot_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m1_t test_vwrdot_vv_i16 (vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwrdot_vv_i16m1_i32m1 (vd, vs1, vs2, vl); +} + +/* +** test_vwrdot_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_vwrdot_vv_i16_m (vbool16_t mask, vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwrdot_vv_i16m1_i32m1_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwrdot_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint64m1_t test_vwrdot_vv_i32 (vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwrdot_vv_i32m1_i64m1 (vd, vs1, vs2, vl); +} + +/* +** test_vwrdot_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint64m1_t test_vwrdot_vv_i32_m (vbool32_t mask, vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwrdot_vv_i32m1_i64m1_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdotsu_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdotsu_vv-compile-1.c index 28d2bca07648..c7521c0a975c 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdotsu_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdotsu_vv-compile-1.c @@ -1,21 +1,75 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m1_t test_vwrdotsu_vv_i8 (vint16m1_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwrdotsu_vv_i8m1_i16m1 (vd, vs1, vs2, vl); } -vint16m1_t test_vwrdotsu_vv_i8_m (vbool8_t mask, vint16m1_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwrdotsu_vv_i8m1_i16m1_m (mask, vd, vs1, vs2, vl); } -vint32m1_t test_vwrdotsu_vv_i16 (vint32m1_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwrdotsu_vv_i16m1_i32m1 (vd, vs1, vs2, vl); } -vint32m1_t test_vwrdotsu_vv_i16_m (vbool16_t mask, vint32m1_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwrdotsu_vv_i16m1_i32m1_m (mask, vd, vs1, vs2, vl); } -vint64m1_t test_vwrdotsu_vv_i32 (vint64m1_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwrdotsu_vv_i32m1_i64m1 (vd, vs1, vs2, vl); } -vint64m1_t test_vwrdotsu_vv_i32_m (vbool32_t mask, vint64m1_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwrdotsu_vv_i32m1_i64m1_m (mask, vd, vs1, vs2, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vwrdotsu\\.vv" 6 } } */ \ No newline at end of file + +/* +** test_vwrdotsu_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotsu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint16m1_t test_vwrdotsu_vv_i8 (vint16m1_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwrdotsu_vv_i8m1_i16m1 (vd, vs1, vs2, vl); +} + +/* +** test_vwrdotsu_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotsu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint16m1_t test_vwrdotsu_vv_i8_m (vbool8_t mask, vint16m1_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwrdotsu_vv_i8m1_i16m1_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwrdotsu_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotsu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m1_t test_vwrdotsu_vv_i16 (vint32m1_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwrdotsu_vv_i16m1_i32m1 (vd, vs1, vs2, vl); +} + +/* +** test_vwrdotsu_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotsu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_vwrdotsu_vv_i16_m (vbool16_t mask, vint32m1_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwrdotsu_vv_i16m1_i32m1_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwrdotsu_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotsu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint64m1_t test_vwrdotsu_vv_i32 (vint64m1_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwrdotsu_vv_i32m1_i64m1 (vd, vs1, vs2, vl); +} + +/* +** test_vwrdotsu_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotsu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint64m1_t test_vwrdotsu_vv_i32_m (vbool32_t mask, vint64m1_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwrdotsu_vv_i32m1_i64m1_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdotu_hv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdotu_hv-compile-1.c index 703fda13b731..5ecbd055faf8 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdotu_hv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdotu_hv-compile-1.c @@ -1,17 +1,53 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vuint32m1_t test_vwrdotu_hv_u16 (vuint32m1_t vd, vuint16m1_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwrdotu_hv_u16m1_u32m1 (vd, vs1, vs2, vl); } -vuint32m1_t test_vwrdotu_hv_u16_m (vbool16_t mask, vuint32m1_t vd, vuint16m1_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwrdotu_hv_u16m1_u32m1_m (mask, vd, vs1, vs2, vl); } -vuint64m1_t test_vwrdotu_hv_u32 (vuint64m1_t vd, vuint32m1_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwrdotu_hv_u32m1_u64m1 (vd, vs1, vs2, vl); } -vuint64m1_t test_vwrdotu_hv_u32_m (vbool32_t mask, vuint64m1_t vd, vuint32m1_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwrdotu_hv_u32m1_u64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vwrdotu\\.hv" 4 } } */ \ No newline at end of file +/* +** test_vwrdotu_hv_u16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotu\.hv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vuint32m1_t test_vwrdotu_hv_u16 (vuint32m1_t vd, vuint16m1_t vs1, vuint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwrdotu_hv_u16m1_u32m1 (vd, vs1, vs2, vl); +} + +/* +** test_vwrdotu_hv_u16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotu\.hv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vuint32m1_t test_vwrdotu_hv_u16_m (vbool16_t mask, vuint32m1_t vd, vuint16m1_t vs1, vuint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwrdotu_hv_u16m1_u32m1_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwrdotu_hv_u32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotu\.hv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vuint64m1_t test_vwrdotu_hv_u32 (vuint64m1_t vd, vuint32m1_t vs1, vuint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwrdotu_hv_u32m1_u64m1 (vd, vs1, vs2, vl); +} + +/* +** test_vwrdotu_hv_u32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotu\.hv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vuint64m1_t test_vwrdotu_hv_u32_m (vbool32_t mask, vuint64m1_t vd, vuint32m1_t vs1, vuint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwrdotu_hv_u32m1_u64m1_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdotu_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdotu_vv-compile-1.c index 00f8fe65db18..0c48930f760d 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdotu_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdotu_vv-compile-1.c @@ -1,21 +1,75 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vuint16m1_t test_vwrdotu_vv_u8 (vuint16m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwrdotu_vv_u8m1_u16m1 (vd, vs1, vs2, vl); } -vuint16m1_t test_vwrdotu_vv_u8_m (vbool8_t mask, vuint16m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwrdotu_vv_u8m1_u16m1_m (mask, vd, vs1, vs2, vl); } -vuint32m1_t test_vwrdotu_vv_u16 (vuint32m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwrdotu_vv_u16m1_u32m1 (vd, vs1, vs2, vl); } -vuint32m1_t test_vwrdotu_vv_u16_m (vbool16_t mask, vuint32m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwrdotu_vv_u16m1_u32m1_m (mask, vd, vs1, vs2, vl); } -vuint64m1_t test_vwrdotu_vv_u32 (vuint64m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwrdotu_vv_u32m1_u64m1 (vd, vs1, vs2, vl); } -vuint64m1_t test_vwrdotu_vv_u32_m (vbool32_t mask, vuint64m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwrdotu_vv_u32m1_u64m1_m (mask, vd, vs1, vs2, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vwrdotu\\.vv" 6 } } */ \ No newline at end of file + +/* +** test_vwrdotu_vv_u8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vuint16m1_t test_vwrdotu_vv_u8 (vuint16m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwrdotu_vv_u8m1_u16m1 (vd, vs1, vs2, vl); +} + +/* +** test_vwrdotu_vv_u8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vuint16m1_t test_vwrdotu_vv_u8_m (vbool8_t mask, vuint16m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwrdotu_vv_u8m1_u16m1_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwrdotu_vv_u16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vuint32m1_t test_vwrdotu_vv_u16 (vuint32m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwrdotu_vv_u16m1_u32m1 (vd, vs1, vs2, vl); +} + +/* +** test_vwrdotu_vv_u16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vuint32m1_t test_vwrdotu_vv_u16_m (vbool16_t mask, vuint32m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwrdotu_vv_u16m1_u32m1_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwrdotu_vv_u32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vuint64m1_t test_vwrdotu_vv_u32 (vuint64m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwrdotu_vv_u32m1_u64m1 (vd, vs1, vs2, vl); +} + +/* +** test_vwrdotu_vv_u32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vuint64m1_t test_vwrdotu_vv_u32_m (vbool32_t mask, vuint64m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwrdotu_vv_u32m1_u64m1_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmac_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmac_vv-compile-1.c index 447296897852..66f3818b5671 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmac_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmac_vv-compile-1.c @@ -1,21 +1,80 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m2_t test_vwsmac_vv_i8 (vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsmac_vv_i16m2 (vd, vs1, vs2, vl); } -vint16m2_t test_vwsmac_vv_i8_m (vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsmac_vv_i16m2_m (mask, vd, vs1, vs2, vl); } -vint32m2_t test_vwsmac_vv_i16 (vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsmac_vv_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwsmac_vv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsmac_vv_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwsmac_vv_i32 (vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwsmac_vv_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwsmac_vv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwsmac_vv_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwsmac_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vwsmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint16m2_t +test_vwsmac_vv_i8 (vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmac_vv_i16m2 (vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwsmac\\.vv" 6 } } */ \ No newline at end of file +/* +** test_vwsmac_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vwsmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint16m2_t +test_vwsmac_vv_i8_m (vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmac_vv_i16m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsmac_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwsmac_vv_i16 (vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmac_vv_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsmac_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwsmac_vv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmac_vv_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsmac_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwsmac_vv_i32 (vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmac_vv_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsmac_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwsmac_vv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmac_vv_i64m2_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmac_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmac_vx-compile-1.c index c041d9920f62..4edea20e7a45 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmac_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmac_vx-compile-1.c @@ -1,21 +1,80 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m2_t test_vwsmac_vx_i8 (vint16m2_t vd, int vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsmac_vx_i16m2 (vd, vs1, vs2, vl); } -vint16m2_t test_vwsmac_vx_i8_m (vbool8_t mask, vint16m2_t vd, int vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsmac_vx_i16m2_m (mask, vd, vs1, vs2, vl); } -vint32m2_t test_vwsmac_vx_i16 (vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsmac_vx_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwsmac_vx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsmac_vx_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwsmac_vx_i32 (vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwsmac_vx_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwsmac_vx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwsmac_vx_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwsmac_vx_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vwsmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint16m2_t +test_vwsmac_vx_i8 (vint16m2_t vd, int vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmac_vx_i16m2 (vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwsmac\\.vx" 6 } } */ \ No newline at end of file +/* +** test_vwsmac_vx_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vwsmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint16m2_t +test_vwsmac_vx_i8_m (vbool8_t mask, vint16m2_t vd, int vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmac_vx_i16m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsmac_vx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwsmac_vx_i16 (vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmac_vx_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsmac_vx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwsmac_vx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmac_vx_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsmac_vx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwsmac_vx_i32 (vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmac_vx_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsmac_vx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwsmac_vx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmac_vx_i64m2_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmacf_hv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmacf_hv-compile-1.c index 107789fa8615..4d3b5eae3bd8 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmacf_hv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmacf_hv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwsmacf_hv_i16 (vint32m2_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsmacf_hv_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwsmacf_hv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsmacf_hv_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwsmacf_hv_i32 (vint64m2_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsmacf_hv_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwsmacf_hv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsmacf_hv_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwsmacf_hv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsmacf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwsmacf_hv_i16 (vint32m2_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmacf_hv_i32m2 (vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwsmacf\\.hv" 4 } } */ \ No newline at end of file +/* +** test_vwsmacf_hv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsmacf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwsmacf_hv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmacf_hv_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsmacf_hv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsmacf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwsmacf_hv_i32 (vint64m2_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmacf_hv_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsmacf_hv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsmacf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwsmacf_hv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmacf_hv_i64m2_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmacf_hx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmacf_hx-compile-1.c index 94bf03646a56..4213190e2bbd 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmacf_hx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmacf_hx-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwsmacf_hx_i16 (vint32m2_t vd, int vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsmacf_hx_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwsmacf_hx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsmacf_hx_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwsmacf_hx_i32 (vint64m2_t vd, int vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsmacf_hx_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwsmacf_hx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsmacf_hx_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwsmacf_hx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsmacf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwsmacf_hx_i16 (vint32m2_t vd, int vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmacf_hx_i32m2 (vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwsmacf\\.hx" 4 } } */ \ No newline at end of file +/* +** test_vwsmacf_hx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsmacf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwsmacf_hx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmacf_hx_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsmacf_hx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsmacf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwsmacf_hx_i32 (vint64m2_t vd, int vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmacf_hx_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsmacf_hx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsmacf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwsmacf_hx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmacf_hx_i64m2_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsac_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsac_vv-compile-1.c index a0a2d7dadfba..1183ee34f957 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsac_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsac_vv-compile-1.c @@ -1,21 +1,80 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m2_t test_vwsnmsac_vv_i8 (vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsac_vv_i16m2 (vd, vs1, vs2, vl); } -vint16m2_t test_vwsnmsac_vv_i8_m (vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsac_vv_i16m2_m (mask, vd, vs1, vs2, vl); } -vint32m2_t test_vwsnmsac_vv_i16 (vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsac_vv_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwsnmsac_vv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsac_vv_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwsnmsac_vv_i32 (vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsac_vv_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwsnmsac_vv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsac_vv_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwsnmsac_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint16m2_t +test_vwsnmsac_vv_i8 (vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsac_vv_i16m2 (vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwsnmsac\\.vv" 6 } } */ \ No newline at end of file +/* +** test_vwsnmsac_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint16m2_t +test_vwsnmsac_vv_i8_m (vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsac_vv_i16m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsac_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwsnmsac_vv_i16 (vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsac_vv_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsac_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwsnmsac_vv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsac_vv_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsac_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwsnmsac_vv_i32 (vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsac_vv_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsac_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwsnmsac_vv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsac_vv_i64m2_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsac_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsac_vx-compile-1.c index fd6fd4ec3f0a..443d9fd0a678 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsac_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsac_vx-compile-1.c @@ -1,21 +1,80 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m2_t test_vwsnmsac_vx_i8 (vint16m2_t vd, int vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsac_vx_i16m2 (vd, vs1, vs2, vl); } -vint16m2_t test_vwsnmsac_vx_i8_m (vbool8_t mask, vint16m2_t vd, int vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsac_vx_i16m2_m (mask, vd, vs1, vs2, vl); } -vint32m2_t test_vwsnmsac_vx_i16 (vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsac_vx_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwsnmsac_vx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsac_vx_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwsnmsac_vx_i32 (vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsac_vx_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwsnmsac_vx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsac_vx_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwsnmsac_vx_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint16m2_t +test_vwsnmsac_vx_i8 (vint16m2_t vd, int vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsac_vx_i16m2 (vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwsnmsac\\.vx" 6 } } */ \ No newline at end of file +/* +** test_vwsnmsac_vx_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint16m2_t +test_vwsnmsac_vx_i8_m (vbool8_t mask, vint16m2_t vd, int vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsac_vx_i16m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsac_vx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwsnmsac_vx_i16 (vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsac_vx_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsac_vx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwsnmsac_vx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsac_vx_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsac_vx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwsnmsac_vx_i32 (vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsac_vx_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsac_vx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwsnmsac_vx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsac_vx_i64m2_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsacf_hv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsacf_hv-compile-1.c index 7c43087a707e..43dd8a51e50a 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsacf_hv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsacf_hv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwsnmsacf_hv_i16 (vint32m2_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsacf_hv_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwsnmsacf_hv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsacf_hv_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwsnmsacf_hv_i32 (vint64m2_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsacf_hv_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwsnmsacf_hv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsacf_hv_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwsnmsacf_hv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsacf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwsnmsacf_hv_i16 (vint32m2_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsacf_hv_i32m2 (vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwsnmsacf\\.hv" 4 } } */ \ No newline at end of file +/* +** test_vwsnmsacf_hv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsacf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwsnmsacf_hv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsacf_hv_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsacf_hv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsacf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwsnmsacf_hv_i32 (vint64m2_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsacf_hv_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsacf_hv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsacf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwsnmsacf_hv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsacf_hv_i64m2_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsacf_hx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsacf_hx-compile-1.c index 1b4f788e5601..8556781039ec 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsacf_hx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsacf_hx-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwsnmsacf_hx_i16 (vint32m2_t vd, int vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsacf_hx_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwsnmsacf_hx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsacf_hx_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwsnmsacf_hx_i32 (vint64m2_t vd, int vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsacf_hx_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwsnmsacf_hx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsacf_hx_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwsnmsacf_hx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsacf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwsnmsacf_hx_i16 (vint32m2_t vd, int vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsacf_hx_i32m2 (vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwsnmsacf\\.hx" 4 } } */ \ No newline at end of file +/* +** test_vwsnmsacf_hx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsacf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwsnmsacf_hx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsacf_hx_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsacf_hx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsacf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwsnmsacf_hx_i32 (vint64m2_t vd, int vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsacf_hx_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsacf_hx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsacf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwsnmsacf_hx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsacf_hx_i64m2_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vi-compile-1.c index a949cad8b352..fe26a2d82426 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vi-compile-1.c @@ -10,9 +10,9 @@ /* ** test_vwsra_vi_i8: ** csrwi\s+vxrm,0 -** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m2,\s*t[au],\s*m[au] -** arcv.vwsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ -** ret +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[0-9]+ +** ret */ vint16m2_t test_vwsra_vi_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) @@ -23,8 +23,8 @@ test_vwsra_vi_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) /* ** test_vwsra_vi_i8_m: ** csrwi\s+vxrm,0 -** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m2,\s*t[au],\s*m[au] -** arcv.vwsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint16m2_t @@ -36,8 +36,8 @@ test_vwsra_vi_i8_m (vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) /* ** test_vwsra_vi_i16: ** csrwi\s+vxrm,0 -** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m2,\s*t[au],\s*m[au] -** arcv.vwsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint32m2_t @@ -49,8 +49,8 @@ test_vwsra_vi_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) /* ** test_vwsra_vi_i16_m: ** csrwi\s+vxrm,0 -** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m2,\s*t[au],\s*m[au] -** arcv.vwsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint32m2_t @@ -62,8 +62,8 @@ test_vwsra_vi_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) /* ** test_vwsra_vi_i32: ** csrwi\s+vxrm,0 -** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m2,\s*t[au],\s*m[au] -** arcv.vwsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint64m2_t @@ -75,8 +75,8 @@ test_vwsra_vi_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) /* ** test_vwsra_vi_i32_m: ** csrwi\s+vxrm,0 -** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m2,\s*t[au],\s*m[au] -** arcv.vwsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint64m2_t diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vv-compile-1.c index df28148a2609..80200a241c6d 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vv-compile-1.c @@ -10,9 +10,9 @@ /* ** test_vwsra_vv_i8: ** csrwi\s+vxrm,0 -** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m2,\s*t[au],\s*m[au] -** arcv.vwsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ -** ret +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret */ vint16m2_t test_vwsra_vv_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) @@ -23,8 +23,8 @@ test_vwsra_vv_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) /* ** test_vwsra_vv_i8_m: ** csrwi\s+vxrm,0 -** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m2,\s*t[au],\s*m[au] -** arcv.vwsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint16m2_t @@ -36,8 +36,8 @@ test_vwsra_vv_i8_m (vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) /* ** test_vwsra_vv_i16: ** csrwi\s+vxrm,0 -** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m2,\s*t[au],\s*m[au] -** arcv.vwsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint32m2_t @@ -49,8 +49,8 @@ test_vwsra_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) /* ** test_vwsra_vv_i16_m: ** csrwi\s+vxrm,0 -** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m2,\s*t[au],\s*m[au] -** arcv.vwsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint32m2_t @@ -62,8 +62,8 @@ test_vwsra_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) /* ** test_vwsra_vv_i32: ** csrwi\s+vxrm,0 -** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m2,\s*t[au],\s*m[au] -** arcv.vwsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint64m2_t @@ -75,8 +75,8 @@ test_vwsra_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) /* ** test_vwsra_vv_i32_m: ** csrwi\s+vxrm,0 -** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m2,\s*t[au],\s*m[au] -** arcv.vwsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint64m2_t diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vx-compile-1.c index 76f18a87f33a..a4d90a711528 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vx-compile-1.c @@ -10,9 +10,9 @@ /* ** test_vwsra_vx_i8: ** csrwi\s+vxrm,0 -** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m2,\s*t[au],\s*m[au] -** arcv.vwsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ -** ret +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret */ vint16m2_t test_vwsra_vx_i8 (vint8m1_t vs2, int vs1, size_t vl) @@ -23,8 +23,8 @@ test_vwsra_vx_i8 (vint8m1_t vs2, int vs1, size_t vl) /* ** test_vwsra_vx_i8_m: ** csrwi\s+vxrm,0 -** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m2,\s*t[au],\s*m[au] -** arcv.vwsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint16m2_t @@ -36,8 +36,8 @@ test_vwsra_vx_i8_m (vbool8_t mask, vint8m1_t vs2, int vs1, size_t vl) /* ** test_vwsra_vx_i16: ** csrwi\s+vxrm,0 -** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m2,\s*t[au],\s*m[au] -** arcv.vwsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint32m2_t @@ -49,8 +49,8 @@ test_vwsra_vx_i16 (vint16m1_t vs2, int vs1, size_t vl) /* ** test_vwsra_vx_i16_m: ** csrwi\s+vxrm,0 -** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m2,\s*t[au],\s*m[au] -** arcv.vwsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint32m2_t @@ -62,8 +62,8 @@ test_vwsra_vx_i16_m (vbool16_t mask, vint16m1_t vs2, int vs1, size_t vl) /* ** test_vwsra_vx_i32: ** csrwi\s+vxrm,0 -** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m2,\s*t[au],\s*m[au] -** arcv.vwsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ ** ret */ vint64m2_t @@ -75,8 +75,8 @@ test_vwsra_vx_i32 (vint32m1_t vs2, int vs1, size_t vl) /* ** test_vwsra_vx_i32_m: ** csrwi\s+vxrm,0 -** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m2,\s*t[au],\s*m[au] -** arcv.vwsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** (?:vmv[0-9]*r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])\n\s+)+arcv.vwsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t ** ret */ vint64m2_t diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsrdot_2s_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsrdot_2s_vv-compile-1.c index fa71a2111c14..9323b17c5f7f 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsrdot_2s_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsrdot_2s_vv-compile-1.c @@ -1,21 +1,75 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m1_t test_vwsrdot_2s_vv_i8 (vint16m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsrdot_2s_vv_i8m1_i16m1 (vd, vs1, vs2, vl); } -vint16m1_t test_vwsrdot_2s_vv_i8_m (vbool8_t mask, vint16m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsrdot_2s_vv_i8m1_i16m1_m (mask, vd, vs1, vs2, vl); } -vint32m1_t test_vwsrdot_2s_vv_i16 (vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsrdot_2s_vv_i16m1_i32m1 (vd, vs1, vs2, vl); } -vint32m1_t test_vwsrdot_2s_vv_i16_m (vbool16_t mask, vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsrdot_2s_vv_i16m1_i32m1_m (mask, vd, vs1, vs2, vl); } -vint64m1_t test_vwsrdot_2s_vv_i32 (vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwsrdot_2s_vv_i32m1_i64m1 (vd, vs1, vs2, vl); } -vint64m1_t test_vwsrdot_2s_vv_i32_m (vbool32_t mask, vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwsrdot_2s_vv_i32m1_i64m1_m (mask, vd, vs1, vs2, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vwsrdot\\.2s\\.vv" 6 } } */ \ No newline at end of file + +/* +** test_vwsrdot_2s_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdot\.2s\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint16m1_t test_vwsrdot_2s_vv_i8 (vint16m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsrdot_2s_vv_i8m1_i16m1 (vd, vs1, vs2, vl); +} + +/* +** test_vwsrdot_2s_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdot\.2s\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint16m1_t test_vwsrdot_2s_vv_i8_m (vbool8_t mask, vint16m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsrdot_2s_vv_i8m1_i16m1_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsrdot_2s_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdot\.2s\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m1_t test_vwsrdot_2s_vv_i16 (vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsrdot_2s_vv_i16m1_i32m1 (vd, vs1, vs2, vl); +} + +/* +** test_vwsrdot_2s_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdot\.2s\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_vwsrdot_2s_vv_i16_m (vbool16_t mask, vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsrdot_2s_vv_i16m1_i32m1_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsrdot_2s_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdot\.2s\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint64m1_t test_vwsrdot_2s_vv_i32 (vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsrdot_2s_vv_i32m1_i64m1 (vd, vs1, vs2, vl); +} + +/* +** test_vwsrdot_2s_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdot\.2s\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint64m1_t test_vwsrdot_2s_vv_i32_m (vbool32_t mask, vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsrdot_2s_vv_i32m1_i64m1_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsrdot_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsrdot_vv-compile-1.c index fc6b809eca84..6054a8021683 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsrdot_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsrdot_vv-compile-1.c @@ -1,21 +1,75 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m1_t test_vwsrdot_vv_i8 (vint16m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsrdot_vv_i8m1_i16m1 (vd, vs1, vs2, vl); } -vint16m1_t test_vwsrdot_vv_i8_m (vbool8_t mask, vint16m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsrdot_vv_i8m1_i16m1_m (mask, vd, vs1, vs2, vl); } -vint32m1_t test_vwsrdot_vv_i16 (vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsrdot_vv_i16m1_i32m1 (vd, vs1, vs2, vl); } -vint32m1_t test_vwsrdot_vv_i16_m (vbool16_t mask, vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsrdot_vv_i16m1_i32m1_m (mask, vd, vs1, vs2, vl); } -vint64m1_t test_vwsrdot_vv_i32 (vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwsrdot_vv_i32m1_i64m1 (vd, vs1, vs2, vl); } -vint64m1_t test_vwsrdot_vv_i32_m (vbool32_t mask, vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwsrdot_vv_i32m1_i64m1_m (mask, vd, vs1, vs2, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vwsrdot\\.vv" 6 } } */ \ No newline at end of file + +/* +** test_vwsrdot_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint16m1_t test_vwsrdot_vv_i8 (vint16m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsrdot_vv_i8m1_i16m1 (vd, vs1, vs2, vl); +} + +/* +** test_vwsrdot_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint16m1_t test_vwsrdot_vv_i8_m (vbool8_t mask, vint16m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsrdot_vv_i8m1_i16m1_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsrdot_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m1_t test_vwsrdot_vv_i16 (vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsrdot_vv_i16m1_i32m1 (vd, vs1, vs2, vl); +} + +/* +** test_vwsrdot_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_vwsrdot_vv_i16_m (vbool16_t mask, vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsrdot_vv_i16m1_i32m1_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsrdot_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint64m1_t test_vwsrdot_vv_i32 (vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsrdot_vv_i32m1_i64m1 (vd, vs1, vs2, vl); +} + +/* +** test_vwsrdot_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint64m1_t test_vwsrdot_vv_i32_m (vbool32_t mask, vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsrdot_vv_i32m1_i64m1_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsrdotf_hv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsrdotf_hv-compile-1.c index 1c4ae35e638e..642ef6ecaafb 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsrdotf_hv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsrdotf_hv-compile-1.c @@ -1,17 +1,53 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m1_t test_vwsrdotf_hv_i16 (vint32m1_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsrdotf_hv_i16m1_i32m1 (vd, vs1, vs2, vl); } -vint32m1_t test_vwsrdotf_hv_i16_m (vbool16_t mask, vint32m1_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsrdotf_hv_i16m1_i32m1_m (mask, vd, vs1, vs2, vl); } -vint64m1_t test_vwsrdotf_hv_i32 (vint64m1_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsrdotf_hv_i32m1_i64m1 (vd, vs1, vs2, vl); } -vint64m1_t test_vwsrdotf_hv_i32_m (vbool32_t mask, vint64m1_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsrdotf_hv_i32m1_i64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vwsrdotf\\.hv" 4 } } */ \ No newline at end of file +/* +** test_vwsrdotf_hv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdotf\.hv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint32m1_t test_vwsrdotf_hv_i16 (vint32m1_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsrdotf_hv_i16m1_i32m1 (vd, vs1, vs2, vl); +} + +/* +** test_vwsrdotf_hv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdotf\.hv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint32m1_t test_vwsrdotf_hv_i16_m (vbool16_t mask, vint32m1_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsrdotf_hv_i16m1_i32m1_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsrdotf_hv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdotf\.hv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ +vint64m1_t test_vwsrdotf_hv_i32 (vint64m1_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsrdotf_hv_i32m1_i64m1 (vd, vs1, vs2, vl); +} + +/* +** test_vwsrdotf_hv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdotf\.hv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ +vint64m1_t test_vwsrdotf_hv_i32_m (vbool32_t mask, vint64m1_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsrdotf_hv_i32m1_i64m1_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vsad-vwsad_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vsad-vwsad_vv-compile-1.c index 4b63b657d144..d3695f423d41 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vsad-vwsad_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vsad-vwsad_vv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vsad } */ -/* { dg-options "-march=rv32im_xarcvvsad -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvsad -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vuint16m2_t test_vwsad_vv_i8 (vuint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsad_vv_u16m2 (vd, vs1, vs2, vl); } -vuint16m2_t test_vwsad_vv_i8_m (vbool8_t mask, vuint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsad_vv_u16m2_m (mask, vd, vs1, vs2, vl); } -vuint32m2_t test_vwsad_vv_i16 (vuint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsad_vv_u32m2 (vd, vs1, vs2, vl); } -vuint32m2_t test_vwsad_vv_i16_m (vbool16_t mask, vuint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsad_vv_u32m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwsad_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au] +** arcv\.vwsad\.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vuint16m2_t +test_vwsad_vv_i8 (vuint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsad_vv_u16m2 (vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwsad\\.vv" 4 } } */ +/* +** test_vwsad_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au] +** arcv\.vwsad\.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vuint16m2_t +test_vwsad_vv_i8_m (vbool8_t mask, vuint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsad_vv_u16m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsad_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au] +** arcv\.vwsad\.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vuint32m2_t +test_vwsad_vv_i16 (vuint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsad_vv_u32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsad_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au] +** arcv\.vwsad\.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vuint32m2_t +test_vwsad_vv_i16_m (vbool16_t mask, vuint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsad_vv_u32m2_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vsad-vwsadu_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vsad-vwsadu_vv-compile-1.c index 6a945ab9b1f8..593f46f7745b 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vsad-vwsadu_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vsad-vwsadu_vv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vsad } */ -/* { dg-options "-march=rv32im_xarcvvsad -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvsad -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vuint16m2_t test_vwsadu_vv_u8 (vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsadu_vv_u16m2 (vd, vs1, vs2, vl); } -vuint16m2_t test_vwsadu_vv_u8_m (vbool8_t mask, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsadu_vv_u16m2_m (mask, vd, vs1, vs2, vl); } -vuint32m2_t test_vwsadu_vv_u16 (vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsadu_vv_u32m2 (vd, vs1, vs2, vl); } -vuint32m2_t test_vwsadu_vv_u16_m (vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsadu_vv_u32m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwsadu_vv_u8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au] +** arcv\.vwsadu\.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vuint16m2_t +test_vwsadu_vv_u8 (vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsadu_vv_u16m2 (vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwsadu\\.vv" 4 } } */ +/* +** test_vwsadu_vv_u8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au] +** arcv\.vwsadu\.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vuint16m2_t +test_vwsadu_vv_u8_m (vbool8_t mask, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsadu_vv_u16m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsadu_vv_u16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au] +** arcv\.vwsadu\.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vuint32m2_t +test_vwsadu_vv_u16 (vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsadu_vv_u32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsadu_vv_u16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au] +** arcv\.vwsadu\.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vuint32m2_t +test_vwsadu_vv_u16_m (vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsadu_vv_u32m2_m (mask, vd, vs1, vs2, vl); +} From 5210e6cbc3954bb87a7e69fa0dd713df5ac5c9cb Mon Sep 17 00:00:00 2001 From: Michiel Derhaeg Date: Thu, 9 Oct 2025 13:38:11 +0200 Subject: [PATCH 2/3] arcv: Prevent register overlap for widening/narrowing XARCV instructions Not all instructions had to be updated. - The spec explicitly adds an exception for dot-product instructions. - The spec explicitly adds an exception for redsum instructions. - Ternary operations can't overlap as vs(1|2) will always have a different type than vd. Signed-off-by: Michiel Derhaeg --- gcc/config/riscv/arcv-vector.md | 92 ++++++++++++++++----------------- 1 file changed, 46 insertions(+), 46 deletions(-) diff --git a/gcc/config/riscv/arcv-vector.md b/gcc/config/riscv/arcv-vector.md index 3420bff3d3e9..dadadf81b869 100644 --- a/gcc/config/riscv/arcv-vector.md +++ b/gcc/config/riscv/arcv-vector.md @@ -451,7 +451,7 @@ (set_attr "mode" "")]) (define_insn "@pred_narrow_arcv_vnsra" - [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") + [(set (match_operand: 0 "register_operand" "=&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr") (if_then_else: (unspec: [(match_operand: 1 "vector_mask_operand" "vm, vm,Wc1, Wc1, vm, vm,Wc1,Wc1, vm, vm,Wc1,Wc1") @@ -465,7 +465,7 @@ (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (truncate: (unspec:VWEXTI - [(match_operand:VWEXTI 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr") + [(match_operand:VWEXTI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") (sign_extend:VWEXTI (match_operand: 4 "vector_shift_operand" "0,0,0,0,vr,vr,vr,vr,vk,vk,vk,vk"))] UNSPEC_ARCV_VNSRA)) @@ -476,7 +476,7 @@ (set_attr "mode" "")]) (define_insn "@pred_quad_narrow_arcv_vnsra" - [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") + [(set (match_operand: 0 "register_operand" "=&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr") (if_then_else: (unspec: [(match_operand: 1 "vector_mask_operand" "vm, vm,Wc1, Wc1, vm, vm,Wc1,Wc1, vm, vm,Wc1,Wc1") @@ -490,7 +490,7 @@ (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (truncate: (unspec:VQEXTI - [(match_operand:VQEXTI 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr") + [(match_operand:VQEXTI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") (sign_extend:VQEXTI (match_operand: 4 "vector_shift_operand" "0,0,0,0,vr,vr,vr,vr,vk,vk,vk,vk"))] UNSPEC_ARCV_VNSRA)) @@ -501,7 +501,7 @@ (set_attr "mode" "")]) (define_insn "@pred_narrow_arcv_vnsra_scalar" - [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") + [(set (match_operand: 0 "register_operand" "=&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr") (if_then_else: (unspec: [(match_operand: 1 "vector_mask_operand" "vm, vm,Wc1, Wc1, vm, vm,Wc1,Wc1, vm, vm,Wc1,Wc1") @@ -515,7 +515,7 @@ (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (truncate: (unspec:VWEXTI - [(match_operand:VWEXTI 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr") + [(match_operand:VWEXTI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") (match_operand:SI 4 "reg_or_int_operand" "r,r,r,r,r,r,i,i,i,i,i,i")] UNSPEC_ARCV_VNSRA)) (match_operand: 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] @@ -525,7 +525,7 @@ (set_attr "mode" "")]) (define_insn "@pred_quad_narrow_arcv_vnsra_scalar" - [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") + [(set (match_operand: 0 "register_operand" "=&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr") (if_then_else: (unspec: [(match_operand: 1 "vector_mask_operand" "vm, vm,Wc1, Wc1, vm, vm,Wc1,Wc1, vm, vm,Wc1,Wc1") @@ -539,7 +539,7 @@ (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (truncate: (unspec:VQEXTI - [(match_operand:VQEXTI 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr") + [(match_operand:VQEXTI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") (match_operand:SI 4 "reg_or_int_operand" "r,r,r,r,r,r,i,i,i,i,i,i")] UNSPEC_ARCV_VNSRA)) (match_operand: 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] @@ -549,7 +549,7 @@ (set_attr "mode" "")]) (define_insn "@pred_narrow_arcv_vnsra_s" - [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") + [(set (match_operand: 0 "register_operand" "=&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr") (if_then_else: (unspec: [(match_operand: 1 "vector_mask_operand" "vm, vm,Wc1, Wc1, vm, vm,Wc1,Wc1, vm, vm,Wc1,Wc1") @@ -563,7 +563,7 @@ (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (truncate: (unspec:VWEXTI - [(match_operand:VWEXTI 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr") + [(match_operand:VWEXTI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") (sign_extend:VWEXTI (match_operand: 4 "vector_shift_operand" "0,0,0,0,vr,vr,vr,vr,vk,vk,vk,vk"))] UNSPEC_ARCV_VNSRA_S)) @@ -574,7 +574,7 @@ (set_attr "mode" "")]) (define_insn "@pred_quad_narrow_arcv_vnsra_s" - [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") + [(set (match_operand: 0 "register_operand" "=&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr") (if_then_else: (unspec: [(match_operand: 1 "vector_mask_operand" "vm, vm,Wc1, Wc1, vm, vm,Wc1,Wc1, vm, vm,Wc1,Wc1") @@ -588,7 +588,7 @@ (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (truncate: (unspec:VQEXTI - [(match_operand:VQEXTI 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr") + [(match_operand:VQEXTI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") (sign_extend:VQEXTI (match_operand: 4 "vector_shift_operand" "0,0,0,0,vr,vr,vr,vr,vk,vk,vk,vk"))] UNSPEC_ARCV_VNSRA_S)) @@ -599,7 +599,7 @@ (set_attr "mode" "")]) (define_insn "@pred_narrow_arcv_vnsra_s_scalar" - [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") + [(set (match_operand: 0 "register_operand" "=&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr") (if_then_else: (unspec: [(match_operand: 1 "vector_mask_operand" "vm, vm,Wc1, Wc1, vm, vm,Wc1,Wc1, vm, vm,Wc1,Wc1") @@ -613,7 +613,7 @@ (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (truncate: (unspec:VWEXTI - [(match_operand:VWEXTI 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr") + [(match_operand:VWEXTI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") (match_operand:SI 4 "reg_or_int_operand" "r,r,r,r,r,r,i,i,i,i,i,i")] UNSPEC_ARCV_VNSRA_S)) (match_operand: 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] @@ -623,7 +623,7 @@ (set_attr "mode" "")]) (define_insn "@pred_quad_narrow_arcv_vnsra_s_scalar" - [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") + [(set (match_operand: 0 "register_operand" "=&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr") (if_then_else: (unspec: [(match_operand: 1 "vector_mask_operand" "vm, vm,Wc1, Wc1, vm, vm,Wc1,Wc1, vm, vm,Wc1,Wc1") @@ -637,7 +637,7 @@ (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (truncate: (unspec:VQEXTI - [(match_operand:VQEXTI 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr") + [(match_operand:VQEXTI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") (match_operand:SI 4 "reg_or_int_operand" "r,r,r,r,r,r,i,i,i,i,i,i")] UNSPEC_ARCV_VNSRA_S)) (match_operand: 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] @@ -647,7 +647,7 @@ (set_attr "mode" "")]) (define_insn "@pred_narrow_arcv_vnsra_2s" - [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") + [(set (match_operand: 0 "register_operand" "=&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr") (if_then_else: (unspec: [(match_operand: 1 "vector_mask_operand" "vm, vm,Wc1, Wc1, vm, vm,Wc1,Wc1, vm, vm,Wc1,Wc1") @@ -661,7 +661,7 @@ (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (truncate: (unspec:VWEXTI - [(match_operand:VWEXTI 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr") + [(match_operand:VWEXTI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") (sign_extend:VWEXTI (match_operand: 4 "vector_shift_operand" "0,0,0,0,vr,vr,vr,vr,vk,vk,vk,vk"))] UNSPEC_ARCV_VNSRA_2S)) @@ -672,7 +672,7 @@ (set_attr "mode" "")]) (define_insn "@pred_quad_narrow_arcv_vnsra_2s" - [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") + [(set (match_operand: 0 "register_operand" "=&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr") (if_then_else: (unspec: [(match_operand: 1 "vector_mask_operand" "vm, vm,Wc1, Wc1, vm, vm,Wc1,Wc1, vm, vm,Wc1,Wc1") @@ -686,7 +686,7 @@ (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (truncate: (unspec:VQEXTI - [(match_operand:VQEXTI 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr") + [(match_operand:VQEXTI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") (sign_extend:VQEXTI (match_operand: 4 "vector_shift_operand" "0,0,0,0,vr,vr,vr,vr,vk,vk,vk,vk"))] UNSPEC_ARCV_VNSRA_2S)) @@ -697,7 +697,7 @@ (set_attr "mode" "")]) (define_insn "@pred_narrow_arcv_vnsra_2s_scalar" - [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") + [(set (match_operand: 0 "register_operand" "=&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr") (if_then_else: (unspec: [(match_operand: 1 "vector_mask_operand" "vm, vm,Wc1, Wc1, vm, vm,Wc1,Wc1, vm, vm,Wc1,Wc1") @@ -711,7 +711,7 @@ (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (truncate: (unspec:VWEXTI - [(match_operand:VWEXTI 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr") + [(match_operand:VWEXTI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") (match_operand:SI 4 "reg_or_int_operand" "r,r,r,r,r,r,i,i,i,i,i,i")] UNSPEC_ARCV_VNSRA_2S)) (match_operand: 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] @@ -721,7 +721,7 @@ (set_attr "mode" "")]) (define_insn "@pred_quad_narrow_arcv_vnsra_2s_scalar" - [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") + [(set (match_operand: 0 "register_operand" "=&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr") (if_then_else: (unspec: [(match_operand: 1 "vector_mask_operand" "vm, vm,Wc1, Wc1, vm, vm,Wc1,Wc1, vm, vm,Wc1,Wc1") @@ -735,7 +735,7 @@ (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (truncate: (unspec:VQEXTI - [(match_operand:VQEXTI 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr") + [(match_operand:VQEXTI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") (match_operand:SI 4 "reg_or_int_operand" "r,r,r,r,r,r,i,i,i,i,i,i")] UNSPEC_ARCV_VNSRA_2S)) (match_operand: 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] @@ -745,7 +745,7 @@ (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwsra" - [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") + [(set (match_operand:VWEXTI 0 "register_operand" "=&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr") (if_then_else:VWEXTI (unspec: [(match_operand: 1 "vector_mask_operand" "vm, vm,Wc1, Wc1, vm, vm,Wc1,Wc1, vm, vm,Wc1,Wc1") @@ -770,7 +770,7 @@ (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwsra_scalar" - [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") + [(set (match_operand:VWEXTI 0 "register_operand" "=&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr") (if_then_else:VWEXTI (unspec: [(match_operand: 1 "vector_mask_operand" "vm, vm,Wc1, Wc1, vm, vm,Wc1,Wc1, vm, vm,Wc1,Wc1") @@ -1197,7 +1197,7 @@ (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwmul" - [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") + [(set (match_operand:VQEXTI 0 "register_operand" "=&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr") (if_then_else:VQEXTI (unspec: [(match_operand: 1 "vector_mask_operand" "vm, vm,Wc1, Wc1, vm, vm,Wc1,Wc1, vm, vm,Wc1,Wc1") @@ -1209,7 +1209,7 @@ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) (unspec:VQEXTI [(sign_extend:VQEXTI - (match_operand: 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr")) + (match_operand: 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr")) (sign_extend:VQEXTI (match_operand: 4 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr"))] UNSPEC_ARCV_VWMUL) @@ -1220,7 +1220,7 @@ (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwmul_scalar" - [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") + [(set (match_operand:VQEXTI 0 "register_operand" "=&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr") (if_then_else:VQEXTI (unspec: [(match_operand: 1 "vector_mask_operand" "vm, vm,Wc1, Wc1, vm, vm,Wc1,Wc1, vm, vm,Wc1,Wc1") @@ -1232,7 +1232,7 @@ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) (unspec:VQEXTI [(sign_extend:VQEXTI - (match_operand: 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr")) + (match_operand: 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr")) (match_operand: 4 "register_operand" "r,r,r,r,r,r,r,r,r,r,r,r")] UNSPEC_ARCV_VWMUL) (match_operand:VQEXTI 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] @@ -1289,7 +1289,7 @@ (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwmulu" - [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") + [(set (match_operand:VQEXTI 0 "register_operand" "=&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr") (if_then_else:VQEXTI (unspec: [(match_operand: 1 "vector_mask_operand" "vm, vm,Wc1, Wc1, vm, vm,Wc1,Wc1, vm, vm,Wc1,Wc1") @@ -1301,7 +1301,7 @@ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) (unspec:VQEXTI [(sign_extend:VQEXTI - (match_operand: 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr")) + (match_operand: 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr")) (sign_extend:VQEXTI (match_operand: 4 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr"))] UNSPEC_ARCV_VWMULU) @@ -1312,7 +1312,7 @@ (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwmulu_scalar" - [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") + [(set (match_operand:VQEXTI 0 "register_operand" "=&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr") (if_then_else:VQEXTI (unspec: [(match_operand: 1 "vector_mask_operand" "vm, vm,Wc1, Wc1, vm, vm,Wc1,Wc1, vm, vm,Wc1,Wc1") @@ -1324,7 +1324,7 @@ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) (unspec:VQEXTI [(sign_extend:VQEXTI - (match_operand: 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr")) + (match_operand: 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr")) (match_operand: 4 "register_operand" "r,r,r,r,r,r,r,r,r,r,r,r")] UNSPEC_ARCV_VWMULU) (match_operand:VQEXTI 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] @@ -1381,7 +1381,7 @@ (set_attr "mode" "")]) (define_insn "@pred_half_arcv_vsmulf" - [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") + [(set (match_operand:VWEXTI 0 "register_operand" "=&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr") (if_then_else:VWEXTI (unspec: [(match_operand: 1 "vector_mask_operand" "vm, vm,Wc1, Wc1, vm, vm,Wc1,Wc1, vm, vm,Wc1,Wc1") @@ -1395,7 +1395,7 @@ (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (unspec:VWEXTI [(sign_extend:VWEXTI - (match_operand: 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr")) + (match_operand: 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr")) (match_operand:VWEXTI 4 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr")] UNSPEC_ARCV_VSMULF) (match_operand:VWEXTI 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] @@ -1405,7 +1405,7 @@ (set_attr "mode" "")]) (define_insn "@pred_half_arcv_vsmulf_scalar" - [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") + [(set (match_operand:VWEXTI 0 "register_operand" "=&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr") (if_then_else:VWEXTI (unspec: [(match_operand: 1 "vector_mask_operand" "vm, vm,Wc1, Wc1, vm, vm,Wc1,Wc1, vm, vm,Wc1,Wc1") @@ -1419,7 +1419,7 @@ (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (unspec:VWEXTI [(sign_extend:VWEXTI - (match_operand: 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr")) + (match_operand: 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr")) (match_operand: 4 "register_operand" "r,r,r,r,r,r,r,r,r,r,r,r")] UNSPEC_ARCV_VSMULF) (match_operand:VWEXTI 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] @@ -1429,7 +1429,7 @@ (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwmulf" - [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") + [(set (match_operand:VQEXTI 0 "register_operand" "=&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr") (if_then_else:VQEXTI (unspec: [(match_operand: 1 "vector_mask_operand" "vm, vm,Wc1, Wc1, vm, vm,Wc1,Wc1, vm, vm,Wc1,Wc1") @@ -1441,7 +1441,7 @@ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) (unspec:VQEXTI [(sign_extend:VQEXTI - (match_operand: 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr")) + (match_operand: 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr")) (sign_extend:VQEXTI (match_operand: 4 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr"))] UNSPEC_ARCV_VWMULF) @@ -1452,7 +1452,7 @@ (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwmulf_scalar" - [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") + [(set (match_operand:VQEXTI 0 "register_operand" "=&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr") (if_then_else:VQEXTI (unspec: [(match_operand: 1 "vector_mask_operand" "vm, vm,Wc1, Wc1, vm, vm,Wc1,Wc1, vm, vm,Wc1,Wc1") @@ -1464,7 +1464,7 @@ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) (unspec:VQEXTI [(sign_extend:VQEXTI - (match_operand: 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr")) + (match_operand: 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr")) (match_operand: 4 "register_operand" "r,r,r,r,r,r,r,r,r,r,r,r")] UNSPEC_ARCV_VWMULF) (match_operand:VQEXTI 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] @@ -1866,7 +1866,7 @@ (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscmul" - [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") + [(set (match_operand:VWEXTI 0 "register_operand" "=&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr") (if_then_else:VWEXTI (unspec: [(match_operand: 1 "vector_mask_operand" "vm, vm,Wc1, Wc1, vm, vm,Wc1,Wc1, vm, vm,Wc1,Wc1") @@ -1889,7 +1889,7 @@ (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscmul_scalar" - [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") + [(set (match_operand:VWEXTI 0 "register_operand" "=&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr") (if_then_else:VWEXTI (unspec: [(match_operand: 1 "vector_mask_operand" "vm, vm,Wc1, Wc1, vm, vm,Wc1,Wc1, vm, vm,Wc1,Wc1") @@ -1911,7 +1911,7 @@ (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscjmul" - [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") + [(set (match_operand:VWEXTI 0 "register_operand" "=&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr") (if_then_else:VWEXTI (unspec: [(match_operand: 1 "vector_mask_operand" "vm, vm,Wc1, Wc1, vm, vm,Wc1,Wc1, vm, vm,Wc1,Wc1") @@ -1934,7 +1934,7 @@ (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscjmul_scalar" - [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") + [(set (match_operand:VWEXTI 0 "register_operand" "=&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr,&vd,&vd,&vr,&vr") (if_then_else:VWEXTI (unspec: [(match_operand: 1 "vector_mask_operand" "vm, vm,Wc1, Wc1, vm, vm,Wc1,Wc1, vm, vm,Wc1,Wc1") From 8e46f982bdd7bfbd36720a8d9216925e00389189 Mon Sep 17 00:00:00 2001 From: Michiel Derhaeg Date: Fri, 17 Oct 2025 14:33:05 +0200 Subject: [PATCH 3/3] arcv: Use correct scalar operand description for shift XARCV instructions Before was used for the operand mode. This would produce extra incorrect instructions to be emitted when VEL was 64-bit. Signed-off-by: Michiel Derhaeg --- gcc/config/riscv/arcv-vector.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/gcc/config/riscv/arcv-vector.md b/gcc/config/riscv/arcv-vector.md index dadadf81b869..0608df50cbba 100644 --- a/gcc/config/riscv/arcv-vector.md +++ b/gcc/config/riscv/arcv-vector.md @@ -308,7 +308,7 @@ (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (unspec:V_VLSI [(match_operand:V_VLSI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") - (match_operand: 4 "reg_or_int_operand" "r,r,r,r,r,r,i,i,i,i,i,i")] + (match_operand 4 "pmode_reg_or_uimm5_operand" "r,r,r,r,r,r,K,K,K,K,K,K")] UNSPEC_ARCV_VSRA) (match_operand:V_VLSI 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" @@ -350,7 +350,7 @@ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) (unspec:V_VLSI [(match_operand:V_VLSI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") - (match_operand: 4 "reg_or_int_operand" "r,r,r,r,r,r,i,i,i,i,i,i")] + (match_operand 4 "pmode_reg_or_uimm5_operand" "r,r,r,r,r,r,K,K,K,K,K,K")] UNSPEC_ARCV_VSRAT) (match_operand:V_VLSI 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" @@ -396,7 +396,7 @@ (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (unspec:V_VLSI [(match_operand:V_VLSI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") - (match_operand: 4 "reg_or_int_operand" "r,r,r,r,r,r,i,i,i,i,i,i")] + (match_operand 4 "pmode_reg_or_uimm5_operand" "r,r,r,r,r,r,K,K,K,K,K,K")] UNSPEC_ARCV_VSRA_S) (match_operand:V_VLSI 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" @@ -442,7 +442,7 @@ (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (unspec:V_VLSI [(match_operand:V_VLSI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") - (match_operand: 4 "reg_or_int_operand" "r,r,r,r,r,r,i,i,i,i,i,i")] + (match_operand 4 "pmode_reg_or_uimm5_operand" "r,r,r,r,r,r,K,K,K,K,K,K")] UNSPEC_ARCV_VSRA_2S) (match_operand:V_VLSI 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" @@ -516,7 +516,7 @@ (truncate: (unspec:VWEXTI [(match_operand:VWEXTI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") - (match_operand:SI 4 "reg_or_int_operand" "r,r,r,r,r,r,i,i,i,i,i,i")] + (match_operand 4 "pmode_reg_or_uimm5_operand" "r,r,r,r,r,r,K,K,K,K,K,K")] UNSPEC_ARCV_VNSRA)) (match_operand: 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" @@ -540,7 +540,7 @@ (truncate: (unspec:VQEXTI [(match_operand:VQEXTI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") - (match_operand:SI 4 "reg_or_int_operand" "r,r,r,r,r,r,i,i,i,i,i,i")] + (match_operand 4 "pmode_reg_or_uimm5_operand" "r,r,r,r,r,r,K,K,K,K,K,K")] UNSPEC_ARCV_VNSRA)) (match_operand: 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" @@ -614,7 +614,7 @@ (truncate: (unspec:VWEXTI [(match_operand:VWEXTI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") - (match_operand:SI 4 "reg_or_int_operand" "r,r,r,r,r,r,i,i,i,i,i,i")] + (match_operand 4 "pmode_reg_or_uimm5_operand" "r,r,r,r,r,r,K,K,K,K,K,K")] UNSPEC_ARCV_VNSRA_S)) (match_operand: 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" @@ -638,7 +638,7 @@ (truncate: (unspec:VQEXTI [(match_operand:VQEXTI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") - (match_operand:SI 4 "reg_or_int_operand" "r,r,r,r,r,r,i,i,i,i,i,i")] + (match_operand 4 "pmode_reg_or_uimm5_operand" "r,r,r,r,r,r,K,K,K,K,K,K")] UNSPEC_ARCV_VNSRA_S)) (match_operand: 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" @@ -712,7 +712,7 @@ (truncate: (unspec:VWEXTI [(match_operand:VWEXTI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") - (match_operand:SI 4 "reg_or_int_operand" "r,r,r,r,r,r,i,i,i,i,i,i")] + (match_operand 4 "pmode_reg_or_uimm5_operand" "r,r,r,r,r,r,K,K,K,K,K,K")] UNSPEC_ARCV_VNSRA_2S)) (match_operand: 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" @@ -736,7 +736,7 @@ (truncate: (unspec:VQEXTI [(match_operand:VQEXTI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") - (match_operand:SI 4 "reg_or_int_operand" "r,r,r,r,r,r,i,i,i,i,i,i")] + (match_operand 4 "pmode_reg_or_uimm5_operand" "r,r,r,r,r,r,K,K,K,K,K,K")] UNSPEC_ARCV_VNSRA_2S)) (match_operand: 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" @@ -785,7 +785,7 @@ (unspec:VWEXTI [(sign_extend:VWEXTI (match_operand: 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr")) - (match_operand:SI 4 "reg_or_int_operand" "r,r,r,r,r,r,i,i,i,i,i,i")] + (match_operand 4 "pmode_reg_or_uimm5_operand" "r,r,r,r,r,r,K,K,K,K,K,K")] UNSPEC_ARCV_VWSRA) (match_operand:VWEXTI 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP"