diff --git a/Makefile b/Makefile
index 41c8324b..40163d05 100644
--- a/Makefile
+++ b/Makefile
@@ -142,6 +142,16 @@ $(call set-feature, EXT_C)
 ENABLE_RV32E ?= 0
 $(call set-feature, RV32E)
 
+# Vector extension instructions
+ENABLE_EXT_V ?= 0
+$(call set-feature, EXT_V)
+VLEN ?= 128 # Default VLEN is 128
+ifeq ($(call has, EXT_V), 1)
+CFLAGS += -DVLEN=$(VLEN)
+ENABLE_EXT_F ?= 1
+$(call set-feature, EXT_F)
+endif
+
 # Control and Status Register (CSR)
 ENABLE_Zicsr ?= 1
 $(call set-feature, Zicsr)
diff --git a/src/decode.c b/src/decode.c
index f65556da..2b02ad30 100644
--- a/src/decode.c
+++ b/src/decode.c
@@ -306,6 +306,87 @@ static inline uint16_t c_decode_cbtype_imm(const uint16_t insn)
 }
 #endif /* RV32_HAS(EXT_C) */
 
+#if RV32_HAS(EXT_V) /* Vector extension */
+/* Sign-extened vector immediate */
+static inline int32_t decode_v_imm(const uint32_t insn)
+{
+    return ((int32_t) ((insn << 12) & FR4_RS3)) >> 27;
+}
+
+/* decode vsetvli zimm[10:0] field
+ * zimm = inst[30:20]
+ */
+static inline uint32_t decode_vsetvli_zimm(const uint32_t insn)
+{
+    return (insn & FV_ZIMM_30_20) >> 20;
+}
+
+/* decode vsetivli zimm[9:0] field
+ * zimm = inst[29:20]
+ */
+static inline uint32_t decode_vsetivli_zimm(const uint32_t insn)
+{
+    return (insn & FV_ZIMM_29_20) >> 20;
+}
+
+static inline uint8_t decode_31(const uint32_t insn)
+{
+    return (insn & 0x40000000) >> 30;
+}
+
+/* decode vector mask field
+ * vm = insn[25]
+ */
+static inline uint8_t decode_vm(const uint32_t insn)
+{
+    return (insn & FV_VM) >> 25;
+}
+
+/* decode mop field
+ * mop = insn[27:20]
+ */
+static inline uint8_t decode_mop(const uint32_t insn)
+{
+    return (insn & FV_MOP) >> 26;
+}
+
+/* decode eew(width) field
+ * eew(width) = insn[14:12]
+ */
+static inline uint8_t decode_eew(const uint32_t insn)
+{
+    switch ((insn & FV_14_12) >> 12) {
+    case 0b000:
+        return 0;
+    case 0b101:
+        return 1;
+    case 0b110:
+        return 2;
+    case 0b111:
+        return 3;
+    default:
+        __UNREACHABLE;
+        break;
+    }
+}
+
+/* decode nf field
+ * nf = insn[31:29]
+ */
+static inline uint8_t decode_nf(const uint32_t insn)
+{
+    return (insn & FV_NF) >> 29;
+}
+
+/* decode lumop/sumop field
+ * lumop/sumop = insn[24:20]
+ */
+static inline uint8_t decode_24_20(const uint32_t insn)
+{
+    return ((int32_t) (insn & FV_24_20)) >> 20;
+}
+#endif /* Vector extension */
+
 /* decode I-type
  *  31       20 19   15 14    12 11   7 6      0
  * | imm[11:0] |  rs1  | funct3 |  rd  | opcode |
@@ -385,6 +466,78 @@ static inline void decode_r4type(rv_insn_t *ir, const uint32_t insn)
 }
 #endif
 
+#if RV32_HAS(EXT_V)
+/* decode VL* unit-stride
+ *  31  29  28   27 26  25  24   20 19   15 14   12 11 7 6       0
+ * |  nf  | mew | mop | vm | lumop |  rs1  | width | vd | 0000111 |
+ */
+static inline void decode_VL(rv_insn_t *ir, const uint32_t insn)
+{
+    ir->rs1 = decode_rs1(insn);
+    ir->vd = decode_rd(insn);
+    ir->vm = decode_vm(insn);
+}
+
+/* decode VLS* strided
+ *  31  29  28   27 26  25  24   20 19   15 14   12 11 7 6       0
+ * |  nf  | mew | mop | vm |  rs2  |  rs1  | width | vd | 0000111 |
+ */
+static inline void decode_VLS(rv_insn_t *ir, const uint32_t insn)
+{
+    ir->rs2 = decode_rs2(insn);
+    ir->rs1 = decode_rs1(insn);
+    ir->vd = decode_rd(insn);
+    ir->vm = decode_vm(insn);
+}
+
+/* decode VLS* strided
+ *  31  29  28   27 26  25  24   20 19   15 14   12 11 7 6       0
+ * |  nf  | mew | mop | vm |  vs2  |  rs1  | width | vd | 0000111 |
+ */
+static inline void decode_VLX(rv_insn_t *ir, const uint32_t insn)
+{
+    ir->vs2 = decode_rs2(insn);
+    ir->rs1 = decode_rs1(insn);
+    ir->vd = decode_rd(insn);
+    ir->vm = decode_vm(insn);
+}
+
+/* decode VS* unit-stride
+ *  31  29  28   27 26  25  24   20 19   15 14   12 11  7 6       0
+ * |  nf  | mew | mop | vm | sumop |  rs1  | width | vs3 | 0100111 |
+ */
+static inline void decode_VS(rv_insn_t *ir, const uint32_t insn)
+{
+    ir->rs1 = decode_rs1(insn);
+    ir->vs3 = decode_rd(insn);
+    ir->vm = decode_vm(insn);
+}
+
+/* decode VSS* strided
+ *  31  29  28   27 26  25  24   20 19   15 14   12 11  7 6       0
+ * |  nf  | mew | mop | vm |  rs2  |  rs1  | width | vs3 | 0100111 |
+ */
+static inline void decode_VSS(rv_insn_t *ir, const uint32_t insn)
+{
+    ir->rs2 = decode_rs2(insn);
+    ir->rs1 = decode_rs1(insn);
+    ir->vs3 = decode_rd(insn);
+    ir->vm = decode_vm(insn);
+}
+
+/* decode VLS* strided
+ *  31  29  28   27 26  25  24   20 19   15 14   12 11  7 6       0
+ * |  nf  | mew | mop | vm |  vs2  |  rs1  | width | vs3 | 0100111 |
+ */
+static inline void decode_VSX(rv_insn_t *ir, const uint32_t insn)
+{
+    ir->vs2 = decode_rs2(insn);
+    ir->rs1 = decode_rs1(insn);
+    ir->vs3 = decode_rd(insn);
+    ir->vm = decode_vm(insn);
+}
+#endif
+
 /* LOAD: I-type
  *  31       20 19   15 14    12 11   7 6      0
  * | imm[11:0] |  rs1  | funct3 |  rd  | opcode |
@@ -1199,6 +1352,145 @@ static inline bool op_amo(rv_insn_t *ir, const uint32_t insn)
  */
 static inline bool op_load_fp(rv_insn_t *ir, const uint32_t insn)
 {
+#if RV32_HAS(EXT_V)
+    /* inst nf mew mop vm   rs2/vs1  rs1   width vd  opcode
+     * ----+---+---+---+--+---------+-----+-----+---+--------
+     * VL*   nf mew mop vm    lumop  rs1   width vd  0000111
+     * VLS*  nf mew mop vm    rs2    rs1   width vd  0000111
+     * VLX*  nf mew mop vm    vs2    rs1   width vd  0000111
+     *
+     * There are 177 vector load instructions under the opcode 0000111. These
+     * instructions follow the naming pattern vlxxx<nf>e<i><eew><ff>.v, which
+     * can be decoded based on mop, (lumop), nf, and eew. Since decoding
+     * involves multiple switch statements, this implementation leverages the
+     * enum structure in RV_INSN_LIST to calculate the relative offset of each
+     * instruction. The vector load instructions for eew = 64 are included.
+     *
+     * vlxxx<nf>e<i><eew><ff>.v
+     *  └── mop
+     *      ├── 00
+     *      │   └── lumop
+     *      │       ├── 00000
+     *      │       │   └── nf
+     *      │       │       ├── 000
+     *      │       │       │   └── vle<eew>.v
+     *      │       │       ├── 001
+     *      │       │       │   └── vlseg<2>e<eew>.v
+     *      │       │       ├── ...
+     *      │       │       └── 111
+     *      │       │           └── vlseg<8>e<eew>.v
+     *      │       ├── 01000
+     *      │       │   └── nf
+     *      │       │       ├── 000
+     *      │       │       │   └── vl1r<eew>.v
+     *      │       │       ├── 001
+     *      │       │       │   └── vl2r<eew>.v
+     *      │       │       ├── 011
+     *      │       │       │   └── vl4r<eew>.v
+     *      │       │       └── 111
+     *      │       │           └── vl8r<eew>.v
+     *      │       ├── 01011
+     *      │       │   └── vlm.v
+     *      │       └── 10000
+     *      │           ├── 000
+     *      │           │   └── vle<eew>ff.v
+     *      │           ├── 001
+     *      │           │   └── vlseg<2>e<eew>ff.v
+     *      │           ├── ...
+     *      │           └── 111
+     *      │               └── vlseg<8>e<eew>ff.v
+     *      ├── 01
+     *      │   └── nf
+     *      │       ├── 000
+     *      │       │   └── vluxei<eew>.v
+     *      │       ├── 001
+     *      │       │   └── vluxseg<2>ei<eew>.v
+     *      │       ├── ...
+     *      │       └── 111
+     *      │           └── vluxseg<8>ei<eew>.v
+     *      ├── 10
+     *      │   └── nf
+     *      │       ├── 000
+     *      │       │   └── vlse<eew>.v
+     *      │       ├── 001
+     *      │       │   └── vlsseg<2>e<eew>.v
+     *      │       ├── ...
+     *      │       └── 111
+     *      │           └── vlsseg<8>e<eew>.v
+     *      └── 11
+     *          └── nf
+     *              ├── 000
+     *              │   └── vloxei<eew>.v
+     *              ├── 001
+     *              │   └── vloxseg<2>ei<eew>.v
+     *              ├── ...
+     *              └── 111
+     *                  └── vloxseg<8>ei<eew>.v
+     */
+    if (decode_funct3(insn) != 0b010) {
+        uint8_t eew = decode_eew(insn);
+        ir->eew = 8 << eew;
+        uint8_t nf = decode_nf(insn);
+        switch (decode_mop(insn)) {
+        case 0:
+            decode_VL(ir, insn);
+            /* check lumop */
+            switch (decode_24_20(insn)) {
+            case 0b00000:
+                if (!nf) {
+                    ir->opcode = rv_insn_vle8_v + eew;
+                } else {
+                    ir->opcode = rv_insn_vlseg2e8_v + 7 * eew + nf - 1;
+                }
+                break;
+            case 0b01000:
+                ir->opcode = rv_insn_vl1re8_v + 4 * eew + ilog2(nf + 1);
+                break;
+            case 0b01011:
+                ir->opcode = rv_insn_vlm_v;
+                break;
+            case 0b10000:
+                if (!nf) {
+                    ir->opcode = rv_insn_vle8ff_v + eew;
+                } else {
+                    ir->opcode = rv_insn_vlseg2e8ff_v + 7 * eew + nf - 1;
+                }
+                break;
+            default:
+                return false;
+            }
+            break;
+        case 1:
+            decode_VLX(ir, insn);
+            if (!nf) {
+                ir->opcode = rv_insn_vluxei8_v + eew;
+            } else {
+                ir->opcode = rv_insn_vluxseg2ei8_v + 7 * eew + nf - 1;
+            }
+            break;
+        case 2:
+            decode_VLS(ir, insn);
+            if (!nf) {
+                ir->opcode = rv_insn_vlse8_v + eew;
+            } else {
+                ir->opcode = rv_insn_vlsseg2e8_v + 7 * eew + nf - 1;
+            }
+            break;
+        case 3:
+            decode_VLX(ir, insn);
+            if (!nf) {
+                ir->opcode = rv_insn_vloxei8_v + eew;
+            } else {
+                ir->opcode = rv_insn_vloxseg2ei8_v + 7 * eew + nf - 1;
+            }
+            break;
+        default:
+            return false;
+        }
+        return true;
+    }
+#endif
+
     /* inst imm[11:0] rs1 width rd opcode
      * ----+---------+---+-----+--+-------
      * FLW  imm[11:0] rs1 010   rd 0000111
@@ -1217,6 +1509,127 @@ static inline bool op_load_fp(rv_insn_t *ir, const uint32_t insn)
  */
 static inline bool op_store_fp(rv_insn_t *ir, const uint32_t insn)
 {
+#if RV32_HAS(EXT_V)
+    /* inst nf mew mop vm   rs2/vs1  rs1   width vs3  opcode
+     * ----+---+---+---+--+---------+-----+-----+---+--------
+     * VS*   nf mew mop vm    sumop  rs1   width vs3  0100111
+     * VSS*  nf mew mop vm    rs2    rs1   width vs3  0100111
+     * VSX*  nf mew mop vm    vs2    rs1   width vs3  0100111
+     *
+     * There are 133 vector load instructions under the opcode 0100111. The
+     * decode pattern follows the same pattern with vector load instructions.
+     * The vector store instructions for eew = 64 are included.
+     *
+     * vsxxx<nf>e<i><eew>_v
+     *  └── mop
+     *      ├── 00
+     *      │   └── sumop
+     *      │       ├── 00000
+     *      │       │   └── nf
+     *      │       │       ├── 000
+     *      │       │       │   └── vse<eew>.v
+     *      │       │       ├── 001
+     *      │       │       │   └── vsseg<2>e<eew>.v
+     *      │       │       ├── ...
+     *      │       │       └── 111
+     *      │       │           └── vsseg<8>e<eew>.v
+     *      │       ├── 01000
+     *      │       │   └── nf
+     *      │       │       ├── 000
+     *      │       │       │   └── vs1r.v
+     *      │       │       ├── 001
+     *      │       │       │   └── vs2r.v
+     *      │       │       ├── 011
+     *      │       │       │   └── vs4r.v
+     *      │       │       └── 111
+     *      │       │           └── vs8r.v
+     *      │       └── 01011
+     *      │           └── vsm.v
+     *      ├── 01
+     *      │   └── nf
+     *      │       ├── 000
+     *      │       │   └── vsuxei<eew>.v
+     *      │       ├── 001
+     *      │       │   └── vsuxseg<2>ei<eew>.v
+     *      │       ├── ...
+     *      │       └── 111
+     *      │           └── vsuxseg<8>ei<eew>.v
+     *      ├── 10
+     *      │   └── nf
+     *      │       ├── 000
+     *      │       │   └── vsse<eew>.v
+     *      │       ├── 001
+     *      │       │   └── vssseg<2>e<eew>.v
+     *      │       ├── ...
+     *      │       └── 111
+     *      │           └── vssseg<8>e<eew>.v
+     *      └── 11
+     *          └── nf
+     *              ├── 000
+     *              │   └── vsoxei<eew>.v
+     *              ├── 001
+     *              │   └── vsoxseg<2>ei<eew>.v
+     *              ├── ...
+     *              └── 111
+     *                  └── vsoxseg<8>ei<eew>.v
+     */
+    if (decode_funct3(insn) != 0b010) {
+        uint8_t eew = decode_eew(insn);
+        ir->eew = 8 << eew;
+        uint8_t nf = decode_nf(insn);
+        switch (decode_mop(insn)) {
+        case 0:
+            decode_VS(ir, insn);
+            /* check sumop */
+            switch (decode_24_20(insn)) {
+            case 0b00000:
+                if (!nf) {
+                    ir->opcode = rv_insn_vse8_v + eew;
+                } else {
+                    ir->opcode = rv_insn_vsseg2e8_v + 7 * eew + nf - 1;
+                }
+                break;
+            case 0b01000:
+                ir->opcode = rv_insn_vs1r_v + ilog2(nf + 1);
+                break;
+            case 0b01011:
+                ir->opcode = rv_insn_vsm_v;
+                break;
+            default:
+                return false;
+            }
+            break;
+        case 1:
+            decode_VSX(ir, insn);
+            if (!nf) {
+                ir->opcode = rv_insn_vsuxei8_v + eew;
+            } else {
+                ir->opcode = rv_insn_vsuxseg2ei8_v + 7 * eew + nf - 1;
+            }
+            break;
+        case 2:
+            decode_VSS(ir, insn);
+            if (!nf) {
+                ir->opcode = rv_insn_vsse8_v + eew;
+            } else {
+                ir->opcode = rv_insn_vssseg2e8_v + 7 * eew + nf - 1;
+            }
+            break;
+        case 3:
+            decode_VSX(ir, insn);
+            if (!nf) {
+                ir->opcode = rv_insn_vsoxei8_v + eew;
+            } else {
+                ir->opcode = rv_insn_vsoxseg2ei8_v + 7 * eew + nf - 1;
+            }
+            break;
+        default:
+            return false;
+        }
+        return true;
+    }
+#endif
+
     /* inst imm[11:5] rs2 rs1 width imm[4:0] opcode
      * ----+---------+---+---+-----+--------+-------
      * FSW  imm[11:5] rs2 rs1 010   imm[4:0] 0100111
@@ -1971,67 +2384,2039 @@ static inline bool op_cfsw(rv_insn_t *ir, const uint32_t insn)
 #define op_cflwsp OP_UNIMP
 #endif /* RV32_HAS(EXT_C) && RV32_HAS(EXT_F) */
 
-/* handler for all unimplemented opcodes */
-static inline bool op_unimp(rv_insn_t *ir UNUSED, uint32_t insn UNUSED)
+#if RV32_HAS(EXT_V) /* Vector extension */
+/* decode vsetvli
+ *  31  30        20 19   15 14  12 11   7 6      0
+ * | 0 | zimm[11:0] |  rs1  | 111  |  rd  | opcode |
+ */
+static inline void decode_vsetvli(rv_insn_t *ir, const uint32_t insn)
 {
-    return false;
+    ir->zimm = decode_vsetvli_zimm(insn);
+    ir->rs1 = decode_rs1(insn);
+    ir->rd = decode_rd(insn);
 }
 
-/* RV32 decode handler type */
-typedef bool (*decode_t)(rv_insn_t *ir, uint32_t insn);
+/* decode vsetivli
+ *  31  30 29        20 19    15 14  12 11   7 6      0
+ * | 1 |1 | zimm[11:0] |  rs1   | 111  |  rd  | opcode |
+ */
+static inline void decode_vsetivli(rv_insn_t *ir, const uint32_t insn)
+{
+    ir->zimm = decode_vsetivli_zimm(insn);
+    ir->rs1 = decode_rs1(insn);
+    ir->rd = decode_rd(insn);
+}
 
-/* decode RISC-V instruction */
-bool rv_decode(rv_insn_t *ir, uint32_t insn)
+/* decode vsetvl
+ *  31  30    25 24  20 19   15 14  12 11   7 6      0
+ * | 1 | 000000 | rs2  |  rs1  | 111  |  rd  | opcode |
+ */
+static inline void decode_vsetvl(rv_insn_t *ir, const uint32_t insn)
 {
-    assert(ir);
+    ir->rs2 = decode_rs2(insn);
+    ir->rs1 = decode_rs1(insn);
+    ir->rd = decode_rd(insn);
+}
 
-#define OP_UNIMP op_unimp
-#define OP(insn) op_##insn
+/* decode vector-vector operation
+ *  31    26  25  24   20 19   15 14    12 11 7 6       0
+ * | funct6 | vm |  vs2  |  vs1  | funct3 | vd | 1010111 |
+ */
+static inline void decode_vvtype(rv_insn_t *ir, const uint32_t insn)
+{
+    ir->vs2 = decode_rs2(insn);
+    ir->vs1 = decode_rs1(insn);
+    ir->vd = decode_rd(insn);
+    ir->vm = decode_vm(insn);
+}
 
-    /* RV32 base opcode map */
-    /* clang-format off */
-    static const decode_t rv_jump_table[] = {
-    //  000         001           010        011           100         101        110        111
-        OP(load),   OP(load_fp),  OP(unimp), OP(misc_mem), OP(op_imm), OP(auipc), OP(unimp), OP(unimp), // 00
-        OP(store),  OP(store_fp), OP(unimp), OP(amo),      OP(op),     OP(lui),   OP(unimp), OP(unimp), // 01
-        OP(madd),   OP(msub),     OP(nmsub), OP(nmadd),    OP(op_fp),  OP(unimp), OP(unimp), OP(unimp), // 10
-        OP(branch), OP(jalr),     OP(unimp), OP(jal),      OP(system), OP(unimp), OP(unimp), OP(unimp), // 11
-    };
+/* decode vector-immediate operation
+ *  31    26  25  24   20 19   15 14    12 11 7 6       0
+ * | funct6 | vm |  vs2  |  imm  | funct3 | vd | 1010111 |
+ */
+static inline void decode_vitype(rv_insn_t *ir, const uint32_t insn)
+{
+    ir->vs2 = decode_rs2(insn);
+    ir->imm = decode_v_imm(insn);
+    ir->vd = decode_rd(insn);
+    ir->vm = decode_vm(insn);
+}
 
-#if RV32_HAS(EXT_C)
-    /* RV32C opcode map */
-    static const decode_t rvc_jump_table[] = {
-    //  00             01             10          11
-        OP(caddi4spn), OP(caddi),     OP(cslli),  OP(unimp),  // 000
-        OP(unimp),      OP(cjal),      OP(unimp), OP(unimp),  // 001
-        OP(clw),       OP(cli),       OP(clwsp),  OP(unimp),  // 010
-        OP(cflw),      OP(clui),      OP(cflwsp), OP(unimp),  // 011
-        OP(unimp),     OP(cmisc_alu), OP(ccr),    OP(unimp),  // 100
-        OP(unimp),      OP(cj),        OP(unimp), OP(unimp),  // 101
-        OP(csw),       OP(cbeqz),     OP(cswsp),  OP(unimp),  // 110
-        OP(cfsw),      OP(cbnez),     OP(cfswsp), OP(unimp),  // 111
-    };
-#endif
-    /* clang-format on */
+/* decode vector-scalar operation
+ *  31    26  25  24   20 19   15 14    12 11 7 6       0
+ * | funct6 | vm |  vs2  |  rs1  | funct3 | vd | 1010111 |
+ */
+static inline void decode_vxtype(rv_insn_t *ir, const uint32_t insn)
+{
+    ir->vs2 = decode_rs2(insn);
+    ir->rs1 = decode_rs1(insn);
+    ir->vd = decode_rd(insn);
+    ir->vm = decode_vm(insn);
+}
 
-    /* Compressed Extension Instruction */
-#if RV32_HAS(EXT_C)
-    /* If the last 2-bit is one of 0b00, 0b01, and 0b10, it is
-     * a 16-bit instruction.
-     */
-    if (is_compressed(insn)) {
-        insn &= 0x0000FFFF;
-        const uint16_t c_index = (insn & FC_FUNC3) >> 11 | (insn & FC_OPCODE);
+/* decode vector mask instructions with single vector operand
+ *  31    26  25  24   20 19   15 14    12 11 7 6       0
+ * | funct6 | vm |  vs2  |  rs1  | funct3 | rd | 1010111 |
+ */
+static inline void decode_mtype(rv_insn_t *ir, const uint32_t insn)
+{
+    ir->vs2 = decode_rs2(insn);
+    ir->rd = decode_rd(insn);
+    ir->vm = decode_vm(insn);
+}
 
-        /* decode instruction (compressed instructions) */
-        const decode_t op = rvc_jump_table[c_index];
-        assert(op);
-        return op(ir, insn);
+static inline bool op_vcfg(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (insn & 0x80000000) {
+    case 0: /* vsetvli */
+        decode_vsetvli(ir, insn);
+        ir->opcode = rv_insn_vsetvli;
+        break;
+    case 1:
+        switch (decode_31(insn)) {
+        case 0: /* vsetvl */
+            decode_vsetvl(ir, insn);
+            ir->opcode = rv_insn_vsetvl;
+            break;
+        case 1: /* vsetivli */
+            decode_vsetivli(ir, insn);
+            ir->opcode = rv_insn_vsetivli;
+            break;
+        default: /* illegal instruction */
+            return false;
+        }
+        break;
+
+    default: /* illegal instruction */
+        return false;
     }
-#endif
+    return true;
+}
 
-    /* standard uncompressed instruction */
-    const uint32_t index = (insn & INSN_6_2) >> 2;
+/*
+ * Vector instructions under opcode 1010111 are decoded using funct6 (bits
+ * 31-26). A new jump table rvv_jump_table is introduced, similar to
+ * rv_jump_table, but indexed by funct6 to determine the specific vector
+ * operation. The naming convention follows op_funct6, where funct6 is directly
+ * appended after op_ (e.g., op_000000).
+ */
+static inline bool op_000000(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vadd_vv;
+        break;
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfadd_vv;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vredsum_vs;
+        break;
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vadd_vi;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vadd_vx;
+        break;
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfadd_vf;
+        break;
+    case 6:  /* Reserved */
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_000001(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0: /* Reserved */
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfredusum_vs;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vredand_vs;
+        break;
+    case 3:  /* Reserved */
+    case 4:  /* Reserved */
+    case 5:  /* Reserved */
+    case 6:  /* Reserved */
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_000010(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vsub_vv;
+        break;
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfsub_vv;
+        break;
+    case 2: /* Reserved */
+    case 3:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vredor_vs;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vsub_vx;
+        break;
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfsub_vf;
+        break;
+    case 6:  /* Reserved */
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_000011(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0: /* Reserved */
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfredosum_vs;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vredxor_vs;
+        break;
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vrsub_vi;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vrsub_vx;
+        break;
+    case 5:  /* Reserved */
+    case 6:  /* Reserved */
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_000100(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vminu_vv;
+        break;
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfmin_vv;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vredminu_vs;
+        break;
+    case 3:
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vminu_vx;
+        break;
+    case 5: /* Reserved */
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfmin_vf;
+        break;
+    case 6:  /* Reserved */
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_000101(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmin_vv;
+        break;
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfredmin_vs;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vredmin_vs;
+        break;
+    case 3:
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vmin_vx;
+        break;
+    case 5:  /* Reserved */
+    case 6:  /* Reserved */
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_000110(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmaxu_vv;
+        break;
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfmax_vv;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vredmaxu_vs;
+        break;
+    case 3:
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vmaxu_vx;
+        break;
+    case 5: /* Reserved */
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfmax_vf;
+        break;
+    case 6:  /* Reserved */
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_000111(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmax_vv;
+        break;
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfredmax_vs;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vredmax_vs;
+        break;
+    case 3:
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vmax_vx;
+        break;
+    case 5:  /* Reserved */
+    case 6:  /* Reserved */
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+
+static inline bool op_001000(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfsgnj_vv;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vaaddu_vv;
+        break;
+    case 3:
+    case 4:
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfsgnj_vf;
+        break;
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vaaddu_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_001001(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vand_vv;
+        break;
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfsgnjn_vv;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vaadd_vv;
+        break;
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vand_vi;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vand_vx;
+        break;
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfsgnjn_vf;
+        break;
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vaadd_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_001010(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vor_vv;
+        break;
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfsgnjx_vv;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vasubu_vv;
+        break;
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vor_vi;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vor_vx;
+        break;
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfsgnjx_vf;
+        break;
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vasubu_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_001011(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vxor_vv;
+        break;
+    case 1:
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vasub_vv;
+        break;
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vxor_vi;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vxor_vx;
+        break;
+    case 5:
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vasub_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_001100(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vrgather_vv;
+        break;
+    case 1:
+    case 2:
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vrgather_vi;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vrgather_vx;
+        break;
+    case 5:
+    case 6:
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_001110(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vrgatherei16_vv;
+        break;
+    case 1:
+    case 2:
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vslideup_vi;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vslideup_vx;
+        break;
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfslide1up_vf;
+        break;
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vslide1up_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_001111(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+    case 1:
+    case 2:
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vslidedown_vi;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vslidedown_vx;
+        break;
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfslide1down_vf;
+        break;
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vslide1down_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_010000(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->vm = 0;
+        ir->opcode = rv_insn_vadc_vvm;
+        break;
+    case 1:
+        /* FIXME: Implement the decoding for VWFUNARY0. */
+    case 2:
+        /* VWXUNARY0 */
+        switch (decode_rs1(insn)) {
+        case 0:
+            decode_mtype(ir, insn);
+            ir->opcode = rv_insn_vmv_x_s;
+            break;
+        case 0b10000:
+            decode_mtype(ir, insn);
+            ir->opcode = rv_insn_vcpop_m;
+            break;
+        case 0b10001:
+            decode_mtype(ir, insn);
+            ir->opcode = rv_insn_vfirst_m;
+            break;
+        default:
+            return false;
+        }
+        break;
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vadc_vim;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vadc_vxm;
+        break;
+    case 5:
+        /* FIXME: Implement the decoding for VRFUNARY0. */
+    case 6:
+        /* VRXUNARY0 */
+        ir->rd = decode_rd(insn);
+        ir->vs2 = decode_rs2(insn);
+        ir->vm = 1;
+        ir->opcode = rv_insn_vmv_s_x;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_010001(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmadc_vv;
+        break;
+    case 1:
+    case 2:
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vmadc_vi;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vmadc_vx;
+        break;
+    case 5:
+    case 6:
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_010010(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vsbc_vvm;
+        break;
+    case 1:
+        /* FIXME: Implement the decoding for VFUNARY0. */
+        break;
+    case 2:
+        /* FIXME: Implement the decoding for VXUNARY0. */
+    case 3:
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vsbc_vxm;
+        break;
+    case 5:
+    case 6:
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_010011(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmsbc_vv;
+        break;
+    case 1:
+        /* FIXME: Implement the decoding for VFUNARY1. */
+    case 2:
+    case 3:
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vmsbc_vx;
+        break;
+    case 5:
+    case 6:
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_010100(rv_insn_t *ir, const uint32_t insn)
+{
+    /* VMUNARY0 */
+    switch (decode_rs1(insn)) {
+    case 0b00001:
+        decode_mtype(ir, insn);
+        ir->opcode = rv_insn_vmsbf_m;
+        break;
+    case 0b00010:
+        decode_mtype(ir, insn);
+        ir->opcode = rv_insn_vmsof_m;
+        break;
+    case 0b00011:
+        decode_mtype(ir, insn);
+        ir->opcode = rv_insn_vmsif_m;
+        break;
+    case 0b10000:
+        decode_mtype(ir, insn);
+        ir->opcode = rv_insn_viota_m;
+        break;
+    case 0b10001:
+        ir->vd = decode_rd(insn);
+        ir->vm = 1;
+        ir->opcode = rv_insn_vid_v;
+        break;
+    default:
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_010111(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        if (decode_vm(insn)) {
+            ir->vm = 1;
+            ir->opcode = rv_insn_vmv_v_v;
+        } else {
+            ir->vm = 0;
+            ir->opcode = rv_insn_vmerge_vvm;
+        }
+        break;
+    case 1:
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vcompress_vm;
+        break;
+    case 3:
+        decode_vitype(ir, insn);
+        if (decode_vm(insn)) {
+            ir->vm = 1;
+            ir->opcode = rv_insn_vmv_v_i;
+        } else {
+            ir->vm = 0;
+            ir->opcode = rv_insn_vmerge_vim;
+        }
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        if (decode_vm(insn)) {
+            ir->vm = 1;
+            ir->opcode = rv_insn_vmv_v_x;
+        } else {
+            ir->vm = 0;
+            ir->opcode = rv_insn_vmerge_vxm;
+        }
+        break;
+    case 5:
+        decode_vxtype(ir, insn);
+        if (decode_vm(insn)) {
+            ir->vm = 1;
+            ir->opcode = rv_insn_vfmv_v_f;
+        } else {
+            ir->vm = 0;
+            ir->opcode = rv_insn_vfmerge_vfm;
+        }
+        break;
+    case 6:
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_011000(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmseq_vv;
+        break;
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmfeq_vv;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmandn_mm;
+        break;
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vmseq_vi;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vmseq_vx;
+        break;
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vmfeq_vf;
+        break;
+    case 6:
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_011001(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmsne_vv;
+        break;
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmfle_vv;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmand_mm;
+        break;
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vmsne_vi;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vmsne_vx;
+        break;
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vmfle_vf;
+        break;
+    case 6:
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_011010(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmsltu_vv;
+        break;
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmflt_vv;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmor_mm;
+        break;
+    case 3:
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vmsltu_vx;
+        break;
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vmflt_vf;
+        break;
+    case 6:
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_011011(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmslt_vv;
+        break;
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmflt_vv;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmxor_mm;
+        break;
+    case 3:
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vmslt_vx;
+        break;
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vmflt_vf;
+        break;
+    case 6:
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+
+static inline bool op_011100(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmsleu_vv;
+        break;
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmfne_vv;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmorn_mm;
+        break;
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vmsleu_vi;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vmsleu_vx;
+        break;
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vmfne_vf;
+        break;
+    case 6:
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_011101(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmsle_vv;
+        break;
+    case 1:
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmnand_mm;
+        break;
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vmsle_vi;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vmsle_vx;
+        break;
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vmfgt_vf;
+        break;
+    case 6:
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_011110(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+    case 1:
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmnor_mm;
+        break;
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vmsgtu_vi;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vmsgtu_vx;
+        break;
+    case 5:
+    case 6:
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+
+static inline bool op_011111(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+    case 1:
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmxnor_mm;
+        break;
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vmsgt_vi;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vmsgt_vx;
+        break;
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vmfge_vf;
+        break;
+    case 6:
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_100000(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vsaddu_vv;
+        break;
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfdiv_vv;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vdivu_vv;
+        break;
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vsaddu_vi;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vsaddu_vx;
+        break;
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfdiv_vf;
+        break;
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vdivu_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_100001(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vsadd_vv;
+        break;
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfrdiv_vf;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vdiv_vv;
+        break;
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vsadd_vi;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vsadd_vx;
+        break;
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfrdiv_vf;
+        break;
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vdiv_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_100010(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vssubu_vv;
+        break;
+    case 1:
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vremu_vv;
+        break;
+    case 3:
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vssubu_vx;
+        break;
+    case 5:
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vremu_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_100011(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vssub_vv;
+        break;
+    case 1:
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vrem_vv;
+        break;
+    case 3:
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vssub_vx;
+        break;
+    case 5:
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vrem_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_100100(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfmul_vv;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmulhu_vv;
+        break;
+    case 3:
+    case 4:
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfmul_vf;
+        break;
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vmulhu_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_100101(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vsll_vv;
+        break;
+    case 1:
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmul_vv;
+        break;
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vsll_vi;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vsll_vx;
+        break;
+    case 5:
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vmul_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_100110(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+    case 1:
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmulhsu_vv;
+        break;
+    case 3:
+    case 4:
+    case 5:
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vmulhsu_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_100111(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vsmul_vv;
+        break;
+    case 1:
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmulh_vv;
+        break;
+    case 3:
+        /* FIXME: Implement the decoding for vmv<nr>r. */
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vsmul_vx;
+        break;
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfrsub_vf;
+        break;
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vmulh_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_101000(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vsrl_vv;
+        break;
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfmadd_vv;
+        break;
+    case 2:
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vsrl_vi;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vsrl_vx;
+        break;
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfmadd_vf;
+        break;
+    case 6:
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_101001(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vsra_vv;
+        break;
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfnmadd_vv;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmadd_vv;
+        break;
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vsra_vi;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vsra_vx;
+        break;
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfnmadd_vf;
+        break;
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vmadd_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_101010(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vssrl_vv;
+        break;
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfmsub_vv;
+        break;
+    case 2:
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vssrl_vi;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vssrl_vx;
+        break;
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfmsub_vf;
+        break;
+    case 6:
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_101011(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vssra_vv;
+        break;
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfnmsub_vv;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vnmsub_vv;
+        break;
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vssra_vi;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vssra_vx;
+        break;
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfnmsub_vf;
+        break;
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vnmsub_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_101100(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vnsrl_wv;
+        break;
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfmacc_vv;
+        break;
+    case 2:
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vnsrl_wi;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vnsrl_wx;
+        break;
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfmacc_vf;
+        break;
+    case 6:
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_101101(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vnsra_wv;
+        break;
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfnmacc_vv;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vmacc_vv;
+        break;
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vnsra_wi;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vnsra_wx;
+        break;
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfnmacc_vf;
+        break;
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vmacc_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_101110(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vnclipu_wv;
+        break;
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfmsac_vv;
+        break;
+    case 2:
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vnclipu_wi;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vnclipu_wx;
+        break;
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfmsac_vf;
+        break;
+    case 6:
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_101111(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vnclip_wv;
+        break;
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfnmsac_vv;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vnmsac_vv;
+        break;
+    case 3:
+        decode_vitype(ir, insn);
+        ir->opcode = rv_insn_vnclip_wi;
+        break;
+    case 4:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vnclip_wx;
+        break;
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfnmsac_vf;
+        break;
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vnmsac_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_110000(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vwredsumu_vs;
+        break;
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfwadd_vv;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vwaddu_vv;
+        break;
+    case 3:
+    case 4:
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfwadd_vf;
+        break;
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vwaddu_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_110001(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vwredsum_vs;
+        break;
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfwredusum_vs;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vwadd_vv;
+        break;
+    case 3:
+    case 4:
+    case 5:
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vwadd_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_110010(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfwsub_vv;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vwsubu_vv;
+        break;
+    case 3:
+    case 4:
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfwsub_vf;
+        break;
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vwsubu_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_110011(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfwredosum_vs;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vwsub_vv;
+        break;
+    case 3:
+    case 4:
+    case 5:
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vwsub_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_110100(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfwadd_wv;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vwaddu_wv;
+        break;
+    case 3:
+    case 4:
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfwadd_wf;
+        break;
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vwaddu_wx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_110101(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+    case 1:
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vwadd_wv;
+        break;
+    case 3:
+    case 4:
+    case 5:
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vwadd_wx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_110110(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfwsub_wv;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vwsubu_wv;
+        break;
+    case 3:
+    case 4:
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfwsub_wf;
+        break;
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vwsubu_wx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_110111(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+    case 1:
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vwsub_wv;
+        break;
+    case 3:
+    case 4:
+    case 5:
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vwsub_wx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_111000(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfwmul_vv;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vwmulu_vv;
+        break;
+    case 3:
+    case 4:
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfwmul_vf;
+        break;
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vwmulu_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+
+static inline bool op_111010(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+    case 1:
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vwmulsu_vv;
+        break;
+    case 3:
+    case 4:
+    case 5:
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vwmulsu_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_111011(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+    case 1:
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vwmul_vv;
+        break;
+    case 3:
+    case 4:
+    case 5:
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vwmul_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_111100(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfwmacc_vv;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vwmaccu_vv;
+        break;
+    case 3:
+    case 4:
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfwmacc_vf;
+        break;
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vwmaccu_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_111101(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfwnmacc_vv;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vwmacc_vv;
+        break;
+    case 3:
+    case 4:
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfwnmacc_vf;
+        break;
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vwmacc_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_111110(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfwmsac_vv;
+        break;
+    case 2:
+    case 3:
+    case 4:
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfwmsac_vf;
+        break;
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vwmaccus_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+
+static inline bool op_111111(rv_insn_t *ir, const uint32_t insn)
+{
+    switch (decode_funct3(insn)) {
+    case 0:
+    case 1:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vfwnmsac_vv;
+        break;
+    case 2:
+        decode_vvtype(ir, insn);
+        ir->opcode = rv_insn_vwmaccsu_vv;
+        break;
+    case 3:
+    case 4:
+    case 5:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vfwnmsac_vf;
+        break;
+    case 6:
+        decode_vxtype(ir, insn);
+        ir->opcode = rv_insn_vwmaccus_vx;
+        break;
+    default: /* illegal instruction */
+        return false;
+    }
+    return true;
+}
+#else /* !RV32_HAS(EXT_V) */
+#define op_vcfg OP_UNIMP
+#endif /* Vector extension */
+
+/* handler for all unimplemented opcodes */
+static inline bool op_unimp(rv_insn_t *ir UNUSED, uint32_t insn UNUSED)
+{
+    return false;
+}
+
+/* RV32 decode handler type */
+typedef bool (*decode_t)(rv_insn_t *ir, uint32_t insn);
+
+/* decode RISC-V instruction */
+bool rv_decode(rv_insn_t *ir, uint32_t insn)
+{
+    assert(ir);
+
+#define OP_UNIMP op_unimp
+#define OP(insn) op_##insn
+
+    /* RV32 base opcode map */
+    /* clang-format off */
+    static const decode_t rv_jump_table[] = {
+    //  000         001           010        011           100         101        110        111
+        OP(load),   OP(load_fp),  OP(unimp), OP(misc_mem), OP(op_imm), OP(auipc), OP(unimp), OP(unimp), // 00
+        OP(store),  OP(store_fp), OP(unimp), OP(amo),      OP(op),     OP(lui),   OP(unimp), OP(unimp), // 01
+        OP(madd),   OP(msub),     OP(nmsub), OP(nmadd),    OP(op_fp),  OP(vcfg), OP(unimp), OP(unimp),  // 10
+        OP(branch), OP(jalr),     OP(unimp), OP(jal),      OP(system), OP(unimp), OP(unimp), OP(unimp), // 11
+    };
+
+#if RV32_HAS(EXT_C)
+    /* RV32C opcode map */
+    static const decode_t rvc_jump_table[] = {
+    //  00             01             10          11
+        OP(caddi4spn), OP(caddi),     OP(cslli),  OP(unimp),  // 000
+        OP(unimp),      OP(cjal),      OP(unimp), OP(unimp),  // 001
+        OP(clw),       OP(cli),       OP(clwsp),  OP(unimp),  // 010
+        OP(cflw),      OP(clui),      OP(cflwsp), OP(unimp),  // 011
+        OP(unimp),     OP(cmisc_alu), OP(ccr),    OP(unimp),  // 100
+        OP(unimp),      OP(cj),        OP(unimp), OP(unimp),  // 101
+        OP(csw),       OP(cbeqz),     OP(cswsp),  OP(unimp),  // 110
+        OP(cfsw),      OP(cbnez),     OP(cfswsp), OP(unimp),  // 111
+    };
+#endif
+
+#if RV32_HAS(EXT_V)
+    static const decode_t rvv_jump_table[] = {
+    /* This table maps the function6 entries for RISC-V Vector instructions.
+     * For detailed specifications, see:
+     * https://github.com/riscvarchive/riscv-v-spec/blob/master/inst-table.adoc
+     */
+    //  000        001        010        011        100        101        110        111
+        OP(000000), OP(000001), OP(000010), OP(000011), OP(000100), OP(000101), OP(000110), OP(000111),  // 000
+        OP(001000), OP(001001), OP(001010), OP(001011), OP(001100), OP(unimp), OP(001110), OP(001111),   // 001
+        OP(010000), OP(010001), OP(010010), OP(010011), OP(010100), OP(unimp), OP(unimp), OP(010111),    // 010
+        OP(011000), OP(011001), OP(011010), OP(011011), OP(011100), OP(011101), OP(011110), OP(011111),  // 011
+        OP(100000), OP(100001), OP(100010), OP(100011), OP(100100), OP(100101), OP(100110), OP(100111),  // 100
+        OP(101000), OP(101001), OP(101010), OP(101011), OP(101100), OP(101101), OP(101110), OP(101111),  // 101
+        OP(110000), OP(110001), OP(110010), OP(110011), OP(110100), OP(110101), OP(110110), OP(110111),  // 110
+        OP(111000), OP(unimp), OP(111010), OP(111011), OP(111100), OP(111101), OP(111110), OP(111111)    // 111
+    };
+#endif
+    /* clang-format on */
+
+    /* Compressed Extension Instruction */
+#if RV32_HAS(EXT_C)
+    /* If the last 2-bit is one of 0b00, 0b01, and 0b10, it is
+     * a 16-bit instruction.
+     */
+    if (is_compressed(insn)) {
+        insn &= 0x0000FFFF;
+        const uint16_t c_index = (insn & FC_FUNC3) >> 11 | (insn & FC_OPCODE);
+
+        /* decode instruction (compressed instructions) */
+        const decode_t op = rvc_jump_table[c_index];
+        assert(op);
+        return op(ir, insn);
+    }
+#endif
+
+    /* standard uncompressed instruction */
+    const uint32_t index = (insn & INSN_6_2) >> 2;
+
+#if RV32_HAS(EXT_V)
+    /* Handle vector operations */
+    if (index == 0b10101) {
+        /* Since vcfg and vop uses the same opcode */
+        if (decode_funct3(insn) == 0b111) {
+            const decode_t op = rv_jump_table[index];
+            return op(ir, insn);
+        }
+        const uint32_t v_index = (insn >> 26) & 0x3F;
+        const decode_t op = rvv_jump_table[v_index];
+        return op(ir, insn);
+    }
+#endif
 
     /* decode instruction */
     const decode_t op = rv_jump_table[index];
diff --git a/src/decode.h b/src/decode.h
index b566d86c..66f2ba37 100644
--- a/src/decode.h
+++ b/src/decode.h
@@ -231,7 +231,603 @@ enum op_field {
             _(cflw, 0, 2, 1, ENC(rs1, rd))             \
             _(cfsw, 0, 2, 1, ENC(rs1, rs2))            \
         )                                              \
+    )                                                  \
+    /* Vector Extension */                             \
+    IIF(RV32_HAS(EXT_V))(                              \
+        /* Configuration-setting Instructions */       \
+        _(vsetvli, 0, 4, 0, ENC(rs1, rd))              \
+        _(vsetivli, 0, 4, 0, ENC(rs1, rd))             \
+        _(vsetvl, 0, 4, 0, ENC(rs1, rd))               \
+        /* Vector Load instructions */                 \
+        _(vle8_v, 0, 4, 0, ENC(rs1, vd))               \
+        _(vle16_v, 0, 4, 0, ENC(rs1, vd))              \
+        _(vle32_v, 0, 4, 0, ENC(rs1, vd))              \
+        _(vle64_v, 0, 4, 0, ENC(rs1, vd))              \
+        _(vlseg2e8_v, 0, 4, 0, ENC(rs1, vd))           \
+        _(vlseg3e8_v, 0, 4, 0, ENC(rs1, vd))           \
+        _(vlseg4e8_v, 0, 4, 0, ENC(rs1, vd))           \
+        _(vlseg5e8_v, 0, 4, 0, ENC(rs1, vd))           \
+        _(vlseg6e8_v, 0, 4, 0, ENC(rs1, vd))           \
+        _(vlseg7e8_v, 0, 4, 0, ENC(rs1, vd))           \
+        _(vlseg8e8_v, 0, 4, 0, ENC(rs1, vd))           \
+        _(vlseg2e16_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlseg3e16_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlseg4e16_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlseg5e16_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlseg6e16_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlseg7e16_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlseg8e16_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlseg2e32_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlseg3e32_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlseg4e32_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlseg5e32_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlseg6e32_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlseg7e32_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlseg8e32_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlseg2e64_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlseg3e64_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlseg4e64_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlseg5e64_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlseg6e64_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlseg7e64_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlseg8e64_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vl1re8_v, 0, 4, 0, ENC(rs1, vd))             \
+        _(vl2re8_v, 0, 4, 0, ENC(rs1, vd))             \
+        _(vl4re8_v, 0, 4, 0, ENC(rs1, vd))             \
+        _(vl8re8_v, 0, 4, 0, ENC(rs1, vd))             \
+        _(vl1re16_v, 0, 4, 0, ENC(rs1, vd))            \
+        _(vl2re16_v, 0, 4, 0, ENC(rs1, vd))            \
+        _(vl4re16_v, 0, 4, 0, ENC(rs1, vd))            \
+        _(vl8re16_v, 0, 4, 0, ENC(rs1, vd))            \
+        _(vl1re32_v, 0, 4, 0, ENC(rs1, vd))            \
+        _(vl2re32_v, 0, 4, 0, ENC(rs1, vd))            \
+        _(vl4re32_v, 0, 4, 0, ENC(rs1, vd))            \
+        _(vl8re32_v, 0, 4, 0, ENC(rs1, vd))            \
+        _(vl1re64_v, 0, 4, 0, ENC(rs1, vd))            \
+        _(vl2re64_v, 0, 4, 0, ENC(rs1, vd))            \
+        _(vl4re64_v, 0, 4, 0, ENC(rs1, vd))            \
+        _(vl8re64_v, 0, 4, 0, ENC(rs1, vd))            \
+        _(vlm_v, 0, 4, 0, ENC(rs1, vd))                \
+        _(vle8ff_v, 0, 4, 0, ENC(rs1, vd))             \
+        _(vle16ff_v, 0, 4, 0, ENC(rs1, vd))            \
+        _(vle32ff_v, 0, 4, 0, ENC(rs1, vd))            \
+        _(vle64ff_v, 0, 4, 0, ENC(rs1, vd))            \
+        _(vlseg2e8ff_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlseg3e8ff_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlseg4e8ff_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlseg5e8ff_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlseg6e8ff_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlseg7e8ff_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlseg8e8ff_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlseg2e16ff_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vlseg3e16ff_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vlseg4e16ff_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vlseg5e16ff_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vlseg6e16ff_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vlseg7e16ff_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vlseg8e16ff_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vlseg2e32ff_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vlseg3e32ff_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vlseg4e32ff_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vlseg5e32ff_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vlseg6e32ff_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vlseg7e32ff_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vlseg8e32ff_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vlseg2e64ff_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vlseg3e64ff_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vlseg4e64ff_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vlseg5e64ff_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vlseg6e64ff_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vlseg7e64ff_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vlseg8e64ff_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vluxei8_v, 0, 4, 0, ENC(rs1, vd))            \
+        _(vluxei16_v, 0, 4, 0, ENC(rs1, vd))           \
+        _(vluxei32_v, 0, 4, 0, ENC(rs1, vd))           \
+        _(vluxei64_v, 0, 4, 0, ENC(rs1, vd))           \
+        _(vluxseg2ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vluxseg3ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vluxseg4ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vluxseg5ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vluxseg6ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vluxseg7ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vluxseg8ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vluxseg2ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vluxseg3ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vluxseg4ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vluxseg5ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vluxseg6ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vluxseg7ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vluxseg8ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vluxseg2ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vluxseg3ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vluxseg4ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vluxseg5ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vluxseg6ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vluxseg7ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vluxseg8ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vluxseg2ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vluxseg3ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vluxseg4ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vluxseg5ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vluxseg6ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vluxseg7ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vluxseg8ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vlse8_v, 0, 4, 0, ENC(rs1, vd))              \
+        _(vlse16_v, 0, 4, 0, ENC(rs1, vd))             \
+        _(vlse32_v, 0, 4, 0, ENC(rs1, vd))             \
+        _(vlse64_v, 0, 4, 0, ENC(rs1, vd))             \
+        _(vlsseg2e8_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlsseg3e8_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlsseg4e8_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlsseg5e8_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlsseg6e8_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlsseg7e8_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlsseg8e8_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vlsseg2e16_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlsseg3e16_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlsseg4e16_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlsseg5e16_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlsseg6e16_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlsseg7e16_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlsseg8e16_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlsseg2e32_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlsseg3e32_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlsseg4e32_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlsseg5e32_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlsseg6e32_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlsseg7e32_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlsseg8e32_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlsseg2e64_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlsseg3e64_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlsseg4e64_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlsseg5e64_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlsseg6e64_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlsseg7e64_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vlsseg8e64_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vloxei8_v, 0, 4, 0, ENC(rs1, vd))            \
+        _(vloxei16_v, 0, 4, 0, ENC(rs1, vd))           \
+        _(vloxei32_v, 0, 4, 0, ENC(rs1, vd))           \
+        _(vloxei64_v, 0, 4, 0, ENC(rs1, vd))           \
+        _(vloxseg2ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vloxseg3ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vloxseg4ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vloxseg5ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vloxseg6ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vloxseg7ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vloxseg8ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vloxseg2ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vloxseg3ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vloxseg4ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vloxseg5ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vloxseg6ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vloxseg7ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vloxseg8ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vloxseg2ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vloxseg3ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vloxseg4ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vloxseg5ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vloxseg6ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vloxseg7ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vloxseg8ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vloxseg2ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vloxseg3ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vloxseg4ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vloxseg5ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vloxseg6ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vloxseg7ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vloxseg8ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        /* Vector store instructions */                \
+        _(vse8_v, 0, 4, 0, ENC(rs1, vd))               \
+        _(vse16_v, 0, 4, 0, ENC(rs1, vd))              \
+        _(vse32_v, 0, 4, 0, ENC(rs1, vd))              \
+        _(vse64_v, 0, 4, 0, ENC(rs1, vd))              \
+        _(vsseg2e8_v, 0, 4, 0, ENC(rs1, vd))           \
+        _(vsseg3e8_v, 0, 4, 0, ENC(rs1, vd))           \
+        _(vsseg4e8_v, 0, 4, 0, ENC(rs1, vd))           \
+        _(vsseg5e8_v, 0, 4, 0, ENC(rs1, vd))           \
+        _(vsseg6e8_v, 0, 4, 0, ENC(rs1, vd))           \
+        _(vsseg7e8_v, 0, 4, 0, ENC(rs1, vd))           \
+        _(vsseg8e8_v, 0, 4, 0, ENC(rs1, vd))           \
+        _(vsseg2e16_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vsseg3e16_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vsseg4e16_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vsseg5e16_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vsseg6e16_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vsseg7e16_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vsseg8e16_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vsseg2e32_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vsseg3e32_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vsseg4e32_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vsseg5e32_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vsseg6e32_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vsseg7e32_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vsseg8e32_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vsseg2e64_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vsseg3e64_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vsseg4e64_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vsseg5e64_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vsseg6e64_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vsseg7e64_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vsseg8e64_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vs1r_v, 0, 4, 0, ENC(rs1, vd))               \
+        _(vs2r_v, 0, 4, 0, ENC(rs1, vd))               \
+        _(vs4r_v, 0, 4, 0, ENC(rs1, vd))               \
+        _(vs8r_v, 0, 4, 0, ENC(rs1, vd))               \
+        _(vsm_v, 0, 4, 0, ENC(rs1, vd))                \
+        _(vsuxei8_v, 0, 4, 0, ENC(rs1, vd))            \
+        _(vsuxei16_v, 0, 4, 0, ENC(rs1, vd))           \
+        _(vsuxei32_v, 0, 4, 0, ENC(rs1, vd))           \
+        _(vsuxei64_v, 0, 4, 0, ENC(rs1, vd))           \
+        _(vsuxseg2ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vsuxseg3ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vsuxseg4ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vsuxseg5ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vsuxseg6ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vsuxseg7ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vsuxseg8ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vsuxseg2ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsuxseg3ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsuxseg4ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsuxseg5ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsuxseg6ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsuxseg7ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsuxseg8ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsuxseg2ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsuxseg3ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsuxseg4ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsuxseg5ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsuxseg6ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsuxseg7ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsuxseg8ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsuxseg2ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsuxseg3ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsuxseg4ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsuxseg5ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsuxseg6ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsuxseg7ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsuxseg8ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsse8_v, 0, 4, 0, ENC(rs1, vd))              \
+        _(vsse16_v, 0, 4, 0, ENC(rs1, vd))             \
+        _(vsse32_v, 0, 4, 0, ENC(rs1, vd))             \
+        _(vsse64_v, 0, 4, 0, ENC(rs1, vd))             \
+        _(vssseg2e8_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vssseg3e8_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vssseg4e8_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vssseg5e8_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vssseg6e8_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vssseg7e8_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vssseg8e8_v, 0, 4, 0, ENC(rs1, vd))          \
+        _(vssseg2e16_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vssseg3e16_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vssseg4e16_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vssseg5e16_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vssseg6e16_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vssseg7e16_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vssseg8e16_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vssseg2e32_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vssseg3e32_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vssseg4e32_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vssseg5e32_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vssseg6e32_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vssseg7e32_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vssseg8e32_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vssseg2e64_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vssseg3e64_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vssseg4e64_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vssseg5e64_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vssseg6e64_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vssseg7e64_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vssseg8e64_v, 0, 4, 0, ENC(rs1, vd))         \
+        _(vsoxei8_v, 0, 4, 0, ENC(rs1, vd))            \
+        _(vsoxei16_v, 0, 4, 0, ENC(rs1, vd))           \
+        _(vsoxei32_v, 0, 4, 0, ENC(rs1, vd))           \
+        _(vsoxei64_v, 0, 4, 0, ENC(rs1, vd))           \
+        _(vsoxseg2ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vsoxseg3ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vsoxseg4ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vsoxseg5ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vsoxseg6ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vsoxseg7ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vsoxseg8ei8_v, 0, 4, 0, ENC(rs1, vd))        \
+        _(vsoxseg2ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsoxseg3ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsoxseg4ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsoxseg5ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsoxseg6ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsoxseg7ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsoxseg8ei16_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsoxseg2ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsoxseg3ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsoxseg4ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsoxseg5ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsoxseg6ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsoxseg7ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsoxseg8ei32_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsoxseg2ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsoxseg3ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsoxseg4ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsoxseg5ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsoxseg6ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsoxseg7ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        _(vsoxseg8ei64_v, 0, 4, 0, ENC(rs1, vd))       \
+        /* Vector Arithmetic instructions */           \
+        /* OPI */                                      \
+        _(vadd_vv, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vadd_vx, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vadd_vi, 0, 4, 0, ENC(rs2, rd))              \
+        _(vsub_vv, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vsub_vx, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vrsub_vx, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vrsub_vi, 0, 4, 0, ENC(rs2, rd))             \
+        _(vminu_vv, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vminu_vx, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vmin_vv, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vmin_vx, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vmaxu_vv, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vmaxu_vx, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vmax_vv, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vmax_vx, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vand_vv, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vand_vx, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vand_vi, 0, 4, 0, ENC(rs2, rd))              \
+        _(vor_vv, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vor_vx, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vor_vi, 0, 4, 0, ENC(rs2, rd))               \
+        _(vxor_vv, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vxor_vx, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vxor_vi, 0, 4, 0, ENC(rs2, rd))              \
+        _(vrgather_vv, 0, 4, 0, ENC(rs1, rs2, vd))     \
+        _(vrgather_vx, 0, 4, 0, ENC(rs1, rs2, vd))     \
+        _(vrgather_vi, 0, 4, 0, ENC(rs2, rd))          \
+        _(vslideup_vx, 0, 4, 0, ENC(rs1, rs2, vd))     \
+        _(vslideup_vi, 0, 4, 0, ENC(rs2, rd))          \
+        _(vrgatherei16_vv, 0, 4, 0, ENC(rs1, rs2, vd)) \
+        _(vslidedown_vx, 0, 4, 0, ENC(rs1, rs2, vd))   \
+        _(vslidedown_vi, 0, 4, 0, ENC(rs2, rd))        \
+        _(vadc_vvm, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vadc_vxm, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vadc_vim, 0, 4, 0, ENC(rs2, rd))              \
+        _(vmadc_vv, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vmadc_vx, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vmadc_vi, 0, 4, 0, ENC(rs2, rd))             \
+        _(vsbc_vvm, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vsbc_vxm, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vmsbc_vv, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vmsbc_vx, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vmerge_vvm, 0, 4, 0, ENC(rs1, rs2, vd))       \
+        _(vmerge_vxm, 0, 4, 0, ENC(rs1, rs2, vd))       \
+        _(vmerge_vim, 0, 4, 0, ENC(rs2, rd))            \
+        _(vmv_v_v, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vmv_v_x, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vmv_v_i, 0, 4, 0, ENC(rs2, rd))               \
+        _(vmseq_vv, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vmseq_vx, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vmseq_vi, 0, 4, 0, ENC(rs2, rd))             \
+        _(vmsne_vv, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vmsne_vx, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vmsne_vi, 0, 4, 0, ENC(rs2, rd))             \
+        _(vmsltu_vv, 0, 4, 0, ENC(rs1, rs2, vd))       \
+        _(vmsltu_vx, 0, 4, 0, ENC(rs1, rs2, vd))       \
+        _(vmslt_vv, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vmslt_vx, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vmsleu_vv, 0, 4, 0, ENC(rs1, rs2, vd))       \
+        _(vmsleu_vx, 0, 4, 0, ENC(rs1, rs2, vd))       \
+        _(vmsleu_vi, 0, 4, 0, ENC(rs2, rd))            \
+        _(vmsle_vv, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vmsle_vx, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vmsle_vi, 0, 4, 0, ENC(rs2, rd))             \
+        _(vmsgtu_vx, 0, 4, 0, ENC(rs1, rs2, vd))       \
+        _(vmsgtu_vi, 0, 4, 0, ENC(rs2, rd))            \
+        _(vmsgt_vx, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vmsgt_vi, 0, 4, 0, ENC(rs2, rd))             \
+        _(vsaddu_vv, 0, 4, 0, ENC(rs1, rs2, vd))       \
+        _(vsaddu_vx, 0, 4, 0, ENC(rs1, rs2, vd))       \
+        _(vsaddu_vi, 0, 4, 0, ENC(rs2, rd))            \
+        _(vsadd_vv, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vsadd_vx, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vsadd_vi, 0, 4, 0, ENC(rs2, rd))             \
+        _(vssubu_vv, 0, 4, 0, ENC(rs1, rs2, vd))       \
+        _(vssubu_vx, 0, 4, 0, ENC(rs1, rs2, vd))       \
+        _(vssub_vv, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vssub_vx, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vsll_vv, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vsll_vx, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vsll_vi, 0, 4, 0, ENC(rs2, rd))              \
+        _(vsmul_vv, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vsmul_vx, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vsrl_vv, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vsrl_vx, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vsrl_vi, 0, 4, 0, ENC(rs2, rd))              \
+        _(vsra_vv, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vsra_vx, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vsra_vi, 0, 4, 0, ENC(rs2, rd))              \
+        _(vssrl_vv, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vssrl_vx, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vssrl_vi, 0, 4, 0, ENC(rs2, rd))             \
+        _(vssra_vv, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vssra_vx, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vssra_vi, 0, 4, 0, ENC(rs2, rd))             \
+        _(vnsrl_wv, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vnsrl_wx, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vnsrl_wi, 0, 4, 0, ENC(rs2, rd))             \
+        _(vnsra_wv, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vnsra_wx, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vnsra_wi, 0, 4, 0, ENC(rs2, rd))             \
+        _(vnclipu_wv, 0, 4, 0, ENC(rs1, rs2, vd))      \
+        _(vnclipu_wx, 0, 4, 0, ENC(rs1, rs2, vd))      \
+        _(vnclipu_wi, 0, 4, 0, ENC(rs2, rd))           \
+        _(vnclip_wv, 0, 4, 0, ENC(rs1, rs2, vd))       \
+        _(vnclip_wx, 0, 4, 0, ENC(rs1, rs2, vd))       \
+        _(vnclip_wi, 0, 4, 0, ENC(rs2, rd))            \
+        _(vwredsumu_vs, 0, 4, 0, ENC(rs1, rs2, vd))    \
+        _(vwredsum_vs, 0, 4, 0, ENC(rs1, rs2, vd))     \
+        /* OPM */                                         \
+        _(vredsum_vs, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vredand_vs, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vredor_vs, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vredxor_vs, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vredminu_vs, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vredmin_vs, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vredmaxu_vs, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vredmax_vs, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vaaddu_vv, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vaaddu_vx, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vaadd_vv, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vaadd_vx, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vasubu_vv, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vasubu_vx, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vasub_vv, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vasub_vx, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vslide1up_vx, 0, 4, 0, ENC(rs1, rs2, vd))       \
+        _(vslide1down_vx, 0, 4, 0, ENC(rs1, rs2, vd))     \
+        _(vcompress_vm, 0, 4, 0, ENC(rs1, rs2, vd))       \
+        _(vmandn_mm, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vmand_mm, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vmor_mm, 0, 4, 0, ENC(rs1, rs2, vd))            \
+        _(vmxor_mm, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vmorn_mm, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vmnand_mm, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vmnor_mm, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vmxnor_mm, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vdivu_vv, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vdivu_vx, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vdiv_vv, 0, 4, 0, ENC(rs1, rs2, vd))            \
+        _(vdiv_vx, 0, 4, 0, ENC(rs1, rs2, vd))            \
+        _(vremu_vv, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vremu_vx, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vrem_vv, 0, 4, 0, ENC(rs1, rs2, vd))            \
+        _(vrem_vx, 0, 4, 0, ENC(rs1, rs2, vd))            \
+        _(vmulhu_vv, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vmulhu_vx, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vmul_vv, 0, 4, 0, ENC(rs1, rs2, vd))            \
+        _(vmul_vx, 0, 4, 0, ENC(rs1, rs2, vd))            \
+        _(vmulhsu_vv, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vmulhsu_vx, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vmulh_vv, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vmulh_vx, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vmadd_vv, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vmadd_vx, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vnmsub_vv, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vnmsub_vx, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vmacc_vv, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vmacc_vx, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vnmsac_vv, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vnmsac_vx, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vwaddu_vv, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vwaddu_vx, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vwadd_vv, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vwadd_vx, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vwsubu_vv, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vwsubu_vx, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vwsub_vv, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vwsub_vx, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vwaddu_wv, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vwaddu_wx, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vwadd_wv, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vwadd_wx, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vwsubu_wv, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vwsubu_wx, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vwsub_wv, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vwsub_wx, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vwmulu_vv, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vwmulu_vx, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vwmulsu_vv, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vwmulsu_vx, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vwmul_vv, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vwmul_vx, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vwmaccu_vv, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vwmaccu_vx, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vwmacc_vv, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vwmacc_vx, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vwmaccus_vx, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vwmaccsu_vv, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vwmaccsu_vx, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vmv_s_x, 0, 4, 0, ENC(rs1, rs2, vd))            \
+        _(vmv_x_s, 0, 4, 0, ENC(rs1, rs2, vd))            \
+        _(vcpop_m, 0, 4, 0, ENC(rs1, rs2, vd))            \
+        _(vfirst_m, 0, 4, 0, ENC(rs1, rs2, vd))            \
+        _(vmsbf_m, 0, 4, 0, ENC(rs1, rs2, vd))            \
+        _(vmsof_m, 0, 4, 0, ENC(rs1, rs2, vd))            \
+        _(vmsif_m, 0, 4, 0, ENC(rs1, rs2, vd))            \
+        _(viota_m, 0, 4, 0, ENC(rs1, rs2, vd))            \
+        _(vid_v, 0, 4, 0, ENC(rs1, rs2, vd))            \
+        /* OPF */                                         \
+        _(vfadd_vv, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vfadd_vf, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vfredusum_vs, 0, 4, 0, ENC(rs1, rs2, vd))       \
+        _(vfsub_vv, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vfsub_vf, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vfredosum_vs, 0, 4, 0, ENC(rs1, rs2, vd))       \
+        _(vfmin_vv, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vfmin_vf, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vfredmin_vs, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vfmax_vv, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vfmax_vf, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vfredmax_vs, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vfsgnj_vv, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vfsgnj_vf, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vfsgnjn_vv, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vfsgnjn_vf, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vfsgnjx_vv, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vfsgnjx_vf, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vfslide1up_vf, 0, 4, 0, ENC(rs1, rs2, vd))      \
+        _(vfslide1down_vf, 0, 4, 0, ENC(rs1, rs2, vd))    \
+        _(vfmerge_vfm, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vfmv_v_f, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vmfeq_vv, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vmfeq_vf, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vmfle_vv, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vmfle_vf, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vmflt_vv, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vmflt_vf, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vmfne_vv, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vmfne_vf, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vmfgt_vf, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vmfge_vf, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vfdiv_vv, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vfdiv_vf, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vfrdiv_vf, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vfmul_vv, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vfmul_vf, 0, 4, 0, ENC(rs1, rs2, vd))           \
+        _(vfrsub_vf, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vfmadd_vv, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vfmadd_vf, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vfnmadd_vv, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vfnmadd_vf, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vfmsub_vv, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vfmsub_vf, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vfnmsub_vv, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vfnmsub_vf, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vfmacc_vv, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vfmacc_vf, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vfnmacc_vv, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vfnmacc_vf, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vfmsac_vv, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vfmsac_vf, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vfnmsac_vv, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vfnmsac_vf, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vfwadd_vv, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vfwadd_vf, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vfwredusum_vs, 0, 4, 0, ENC(rs1, rs2, vd))      \
+        _(vfwsub_vv, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vfwsub_vf, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vfwredosum_vs, 0, 4, 0, ENC(rs1, rs2, vd))      \
+        _(vfwadd_wv, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vfwadd_wf, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vfwsub_wv, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vfwsub_wf, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vfwmul_vv, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vfwmul_vf, 0, 4, 0, ENC(rs1, rs2, vd))          \
+        _(vfwmacc_vv, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vfwmacc_vf, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vfwnmacc_vv, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vfwnmacc_vf, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vfwmsac_vv, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vfwmsac_vf, 0, 4, 0, ENC(rs1, rs2, vd))         \
+        _(vfwnmsac_vv, 0, 4, 0, ENC(rs1, rs2, vd))        \
+        _(vfwnmsac_vf, 0, 4, 0, ENC(rs1, rs2, vd))        \
     )
+
 /* clang-format on */
 
 /* Macro operation fusion */
@@ -315,6 +911,15 @@ enum {
     //               ....xxxx....xxxx....xxxx....xxxx
     FCJ_IMM      = 0b00000000000000000001111111111100,
     //               ....xxxx....xxxx....xxxx....xxxx
+    FV_ZIMM_30_20 = 0b01111111111100000000000000000000,
+    FV_ZIMM_29_20 = 0b00111111111100000000000000000000,
+    FV_VM        = 0b00000010000000000000000000000000,
+    FV_MOP       = 0b00001100000000000000000000000000,
+    FV_MEW       = 0b00010000000000000000000000000000,
+    FV_NF        = 0b11100000000000000000000000000000,
+    FV_14_12     = 0b00000000000000000111000000000000,
+    FV_24_20     = 0b00000001111100000000000000000000,
+    FV_FUNC6     = 0b11111100000000000000000000000000,
 };
 /* clang-format on */
 
@@ -345,12 +950,18 @@ typedef struct rv_insn {
     };
     uint8_t rd, rs1, rs2;
     /* store IR list */
-    uint8_t opcode;
+    uint16_t opcode;
 
 #if RV32_HAS(EXT_C)
     uint8_t shamt;
 #endif
 
+#if RV32_HAS(EXT_V)
+    int32_t zimm;
+
+    uint8_t vd, vs1, vs2, vs3, eew, vm;
+#endif
+
 #if RV32_HAS(EXT_F)
     /* Floating-point operations use either a static rounding mode encoded in
      * the instruction, or a dynamic rounding mode held in frm. A value of 111
diff --git a/src/feature.h b/src/feature.h
index 1a95ecd5..4afdb11c 100644
--- a/src/feature.h
+++ b/src/feature.h
@@ -118,5 +118,10 @@
 #define RV32_FEATURE_ARCH_TEST 0
 #endif
 
+/* Standard Extension for Vector Instructions */
+#ifndef RV32_FEATURE_EXT_V
+#define RV32_FEATURE_EXT_V 0
+#endif
+
 /* Feature test macro */
 #define RV32_HAS(x) RV32_FEATURE_##x
diff --git a/src/riscv.h b/src/riscv.h
index bc343e8a..502d4e1a 100644
--- a/src/riscv.h
+++ b/src/riscv.h
@@ -296,6 +296,9 @@ typedef uint32_t riscv_exception_t;
 #if RV32_HAS(EXT_F)
 typedef softfloat_float32_t riscv_float_t;
 #endif
+#if RV32_HAS(EXT_V)
+typedef uint32_t vreg_t[VLEN / 32];
+#endif
 
 /* memory read handlers */
 typedef riscv_word_t (*riscv_mem_ifetch)(riscv_t *rv, riscv_word_t addr);
diff --git a/src/riscv_private.h b/src/riscv_private.h
index 163aac1c..ea776085 100644
--- a/src/riscv_private.h
+++ b/src/riscv_private.h
@@ -76,6 +76,15 @@ enum {
     CSR_CYCLEH = 0xC80,
     CSR_TIMEH = 0xC81,
     CSR_INSTRETH = 0xC82,
+
+    /* vector extension */
+    CSR_VSTART = 0x008,
+    CSR_VXSAT = 0x009,
+    CSR_VXRM = 0x00A,
+    CSR_VCSR = 0x00F,
+    CSR_VL = 0xC20,
+    CSR_VTYPE = 0xC21,
+    CSR_LENB = 0xC22,
 };
 
 /* translated basic block */
@@ -227,6 +236,18 @@ struct riscv_internal {
      */
     uint32_t last_csr_sepc;
 #endif
+
+#if RV32_HAS(EXT_V)
+    vreg_t V[N_RV_REGS];
+
+    uint32_t csr_vstart; /* Vector start position */
+    uint32_t csr_vxsat;  /* Fixed-Point Saturate Flag */
+    uint32_t csr_vxrm;   /* Fixed-Point Rounding Mode */
+    uint32_t csr_vcsr;   /* Vector control and status +register */
+    uint32_t csr_vl;     /* Vector length */
+    uint32_t csr_vtype;  /* Vector data type register */
+    uint32_t csr_vlenb;  /* VLEN/8 (vector register length in bytes) */
+#endif
 };
 
 /* sign extend a 16 bit value */
diff --git a/src/rv32_constopt.c b/src/rv32_constopt.c
index f317a3ff..d24774cd 100644
--- a/src/rv32_constopt.c
+++ b/src/rv32_constopt.c
@@ -1238,5 +1238,593 @@ CONSTOPT(bseti, {
     if (ir->rd)
         info->is_constant[ir->rd] = false;
 })
+#endif
 
+/* Vector Extension */
+#if RV32_HAS(EXT_V)
+CONSTOPT(vsetvli, {})
+CONSTOPT(vsetivli, {})
+CONSTOPT(vsetvl, {})
+CONSTOPT(vle8_v, {})
+CONSTOPT(vle16_v, {})
+CONSTOPT(vle32_v, {})
+CONSTOPT(vle64_v, {})
+CONSTOPT(vlseg2e8_v, {})
+CONSTOPT(vlseg3e8_v, {})
+CONSTOPT(vlseg4e8_v, {})
+CONSTOPT(vlseg5e8_v, {})
+CONSTOPT(vlseg6e8_v, {})
+CONSTOPT(vlseg7e8_v, {})
+CONSTOPT(vlseg8e8_v, {})
+CONSTOPT(vlseg2e16_v, {})
+CONSTOPT(vlseg3e16_v, {})
+CONSTOPT(vlseg4e16_v, {})
+CONSTOPT(vlseg5e16_v, {})
+CONSTOPT(vlseg6e16_v, {})
+CONSTOPT(vlseg7e16_v, {})
+CONSTOPT(vlseg8e16_v, {})
+CONSTOPT(vlseg2e32_v, {})
+CONSTOPT(vlseg3e32_v, {})
+CONSTOPT(vlseg4e32_v, {})
+CONSTOPT(vlseg5e32_v, {})
+CONSTOPT(vlseg6e32_v, {})
+CONSTOPT(vlseg7e32_v, {})
+CONSTOPT(vlseg8e32_v, {})
+CONSTOPT(vlseg2e64_v, {})
+CONSTOPT(vlseg3e64_v, {})
+CONSTOPT(vlseg4e64_v, {})
+CONSTOPT(vlseg5e64_v, {})
+CONSTOPT(vlseg6e64_v, {})
+CONSTOPT(vlseg7e64_v, {})
+CONSTOPT(vlseg8e64_v, {})
+CONSTOPT(vl1re8_v, {})
+CONSTOPT(vl1re16_v, {})
+CONSTOPT(vl1re32_v, {})
+CONSTOPT(vl1re64_v, {})
+CONSTOPT(vl2re8_v, {})
+CONSTOPT(vl2re16_v, {})
+CONSTOPT(vl2re32_v, {})
+CONSTOPT(vl2re64_v, {})
+CONSTOPT(vl4re8_v, {})
+CONSTOPT(vl4re16_v, {})
+CONSTOPT(vl4re32_v, {})
+CONSTOPT(vl4re64_v, {})
+CONSTOPT(vl8re8_v, {})
+CONSTOPT(vl8re16_v, {})
+CONSTOPT(vl8re32_v, {})
+CONSTOPT(vl8re64_v, {})
+CONSTOPT(vlm_v, {})
+CONSTOPT(vle8ff_v, {})
+CONSTOPT(vle16ff_v, {})
+CONSTOPT(vle32ff_v, {})
+CONSTOPT(vle64ff_v, {})
+CONSTOPT(vlseg2e8ff_v, {})
+CONSTOPT(vlseg3e8ff_v, {})
+CONSTOPT(vlseg4e8ff_v, {})
+CONSTOPT(vlseg5e8ff_v, {})
+CONSTOPT(vlseg6e8ff_v, {})
+CONSTOPT(vlseg7e8ff_v, {})
+CONSTOPT(vlseg8e8ff_v, {})
+CONSTOPT(vlseg2e16ff_v, {})
+CONSTOPT(vlseg3e16ff_v, {})
+CONSTOPT(vlseg4e16ff_v, {})
+CONSTOPT(vlseg5e16ff_v, {})
+CONSTOPT(vlseg6e16ff_v, {})
+CONSTOPT(vlseg7e16ff_v, {})
+CONSTOPT(vlseg8e16ff_v, {})
+CONSTOPT(vlseg2e32ff_v, {})
+CONSTOPT(vlseg3e32ff_v, {})
+CONSTOPT(vlseg4e32ff_v, {})
+CONSTOPT(vlseg5e32ff_v, {})
+CONSTOPT(vlseg6e32ff_v, {})
+CONSTOPT(vlseg7e32ff_v, {})
+CONSTOPT(vlseg8e32ff_v, {})
+CONSTOPT(vlseg2e64ff_v, {})
+CONSTOPT(vlseg3e64ff_v, {})
+CONSTOPT(vlseg4e64ff_v, {})
+CONSTOPT(vlseg5e64ff_v, {})
+CONSTOPT(vlseg6e64ff_v, {})
+CONSTOPT(vlseg7e64ff_v, {})
+CONSTOPT(vlseg8e64ff_v, {})
+CONSTOPT(vluxei8_v, {})
+CONSTOPT(vluxei16_v, {})
+CONSTOPT(vluxei32_v, {})
+CONSTOPT(vluxei64_v, {})
+CONSTOPT(vluxseg2ei8_v, {})
+CONSTOPT(vluxseg3ei8_v, {})
+CONSTOPT(vluxseg4ei8_v, {})
+CONSTOPT(vluxseg5ei8_v, {})
+CONSTOPT(vluxseg6ei8_v, {})
+CONSTOPT(vluxseg7ei8_v, {})
+CONSTOPT(vluxseg8ei8_v, {})
+CONSTOPT(vluxseg2ei16_v, {})
+CONSTOPT(vluxseg3ei16_v, {})
+CONSTOPT(vluxseg4ei16_v, {})
+CONSTOPT(vluxseg5ei16_v, {})
+CONSTOPT(vluxseg6ei16_v, {})
+CONSTOPT(vluxseg7ei16_v, {})
+CONSTOPT(vluxseg8ei16_v, {})
+CONSTOPT(vluxseg2ei32_v, {})
+CONSTOPT(vluxseg3ei32_v, {})
+CONSTOPT(vluxseg4ei32_v, {})
+CONSTOPT(vluxseg5ei32_v, {})
+CONSTOPT(vluxseg6ei32_v, {})
+CONSTOPT(vluxseg7ei32_v, {})
+CONSTOPT(vluxseg8ei32_v, {})
+CONSTOPT(vluxseg2ei64_v, {})
+CONSTOPT(vluxseg3ei64_v, {})
+CONSTOPT(vluxseg4ei64_v, {})
+CONSTOPT(vluxseg5ei64_v, {})
+CONSTOPT(vluxseg6ei64_v, {})
+CONSTOPT(vluxseg7ei64_v, {})
+CONSTOPT(vluxseg8ei64_v, {})
+CONSTOPT(vlse8_v, {})
+CONSTOPT(vlse16_v, {})
+CONSTOPT(vlse32_v, {})
+CONSTOPT(vlse64_v, {})
+CONSTOPT(vlsseg2e8_v, {})
+CONSTOPT(vlsseg3e8_v, {})
+CONSTOPT(vlsseg4e8_v, {})
+CONSTOPT(vlsseg5e8_v, {})
+CONSTOPT(vlsseg6e8_v, {})
+CONSTOPT(vlsseg7e8_v, {})
+CONSTOPT(vlsseg8e8_v, {})
+CONSTOPT(vlsseg2e16_v, {})
+CONSTOPT(vlsseg3e16_v, {})
+CONSTOPT(vlsseg4e16_v, {})
+CONSTOPT(vlsseg5e16_v, {})
+CONSTOPT(vlsseg6e16_v, {})
+CONSTOPT(vlsseg7e16_v, {})
+CONSTOPT(vlsseg8e16_v, {})
+CONSTOPT(vlsseg2e32_v, {})
+CONSTOPT(vlsseg3e32_v, {})
+CONSTOPT(vlsseg4e32_v, {})
+CONSTOPT(vlsseg5e32_v, {})
+CONSTOPT(vlsseg6e32_v, {})
+CONSTOPT(vlsseg7e32_v, {})
+CONSTOPT(vlsseg8e32_v, {})
+CONSTOPT(vlsseg2e64_v, {})
+CONSTOPT(vlsseg3e64_v, {})
+CONSTOPT(vlsseg4e64_v, {})
+CONSTOPT(vlsseg5e64_v, {})
+CONSTOPT(vlsseg6e64_v, {})
+CONSTOPT(vlsseg7e64_v, {})
+CONSTOPT(vlsseg8e64_v, {})
+CONSTOPT(vloxei8_v, {})
+CONSTOPT(vloxei16_v, {})
+CONSTOPT(vloxei32_v, {})
+CONSTOPT(vloxei64_v, {})
+CONSTOPT(vloxseg2ei8_v, {})
+CONSTOPT(vloxseg3ei8_v, {})
+CONSTOPT(vloxseg4ei8_v, {})
+CONSTOPT(vloxseg5ei8_v, {})
+CONSTOPT(vloxseg6ei8_v, {})
+CONSTOPT(vloxseg7ei8_v, {})
+CONSTOPT(vloxseg8ei8_v, {})
+CONSTOPT(vloxseg2ei16_v, {})
+CONSTOPT(vloxseg3ei16_v, {})
+CONSTOPT(vloxseg4ei16_v, {})
+CONSTOPT(vloxseg5ei16_v, {})
+CONSTOPT(vloxseg6ei16_v, {})
+CONSTOPT(vloxseg7ei16_v, {})
+CONSTOPT(vloxseg8ei16_v, {})
+CONSTOPT(vloxseg2ei32_v, {})
+CONSTOPT(vloxseg3ei32_v, {})
+CONSTOPT(vloxseg4ei32_v, {})
+CONSTOPT(vloxseg5ei32_v, {})
+CONSTOPT(vloxseg6ei32_v, {})
+CONSTOPT(vloxseg7ei32_v, {})
+CONSTOPT(vloxseg8ei32_v, {})
+CONSTOPT(vloxseg2ei64_v, {})
+CONSTOPT(vloxseg3ei64_v, {})
+CONSTOPT(vloxseg4ei64_v, {})
+CONSTOPT(vloxseg5ei64_v, {})
+CONSTOPT(vloxseg6ei64_v, {})
+CONSTOPT(vloxseg7ei64_v, {})
+CONSTOPT(vloxseg8ei64_v, {})
+CONSTOPT(vse8_v, {})
+CONSTOPT(vse16_v, {})
+CONSTOPT(vse32_v, {})
+CONSTOPT(vse64_v, {})
+CONSTOPT(vsseg2e8_v, {})
+CONSTOPT(vsseg3e8_v, {})
+CONSTOPT(vsseg4e8_v, {})
+CONSTOPT(vsseg5e8_v, {})
+CONSTOPT(vsseg6e8_v, {})
+CONSTOPT(vsseg7e8_v, {})
+CONSTOPT(vsseg8e8_v, {})
+CONSTOPT(vsseg2e16_v, {})
+CONSTOPT(vsseg3e16_v, {})
+CONSTOPT(vsseg4e16_v, {})
+CONSTOPT(vsseg5e16_v, {})
+CONSTOPT(vsseg6e16_v, {})
+CONSTOPT(vsseg7e16_v, {})
+CONSTOPT(vsseg8e16_v, {})
+CONSTOPT(vsseg2e32_v, {})
+CONSTOPT(vsseg3e32_v, {})
+CONSTOPT(vsseg4e32_v, {})
+CONSTOPT(vsseg5e32_v, {})
+CONSTOPT(vsseg6e32_v, {})
+CONSTOPT(vsseg7e32_v, {})
+CONSTOPT(vsseg8e32_v, {})
+CONSTOPT(vsseg2e64_v, {})
+CONSTOPT(vsseg3e64_v, {})
+CONSTOPT(vsseg4e64_v, {})
+CONSTOPT(vsseg5e64_v, {})
+CONSTOPT(vsseg6e64_v, {})
+CONSTOPT(vsseg7e64_v, {})
+CONSTOPT(vsseg8e64_v, {})
+CONSTOPT(vs1r_v, {})
+CONSTOPT(vs2r_v, {})
+CONSTOPT(vs4r_v, {})
+CONSTOPT(vs8r_v, {})
+CONSTOPT(vsm_v, {})
+CONSTOPT(vsuxei8_v, {})
+CONSTOPT(vsuxei16_v, {})
+CONSTOPT(vsuxei32_v, {})
+CONSTOPT(vsuxei64_v, {})
+CONSTOPT(vsuxseg2ei8_v, {})
+CONSTOPT(vsuxseg3ei8_v, {})
+CONSTOPT(vsuxseg4ei8_v, {})
+CONSTOPT(vsuxseg5ei8_v, {})
+CONSTOPT(vsuxseg6ei8_v, {})
+CONSTOPT(vsuxseg7ei8_v, {})
+CONSTOPT(vsuxseg8ei8_v, {})
+CONSTOPT(vsuxseg2ei16_v, {})
+CONSTOPT(vsuxseg3ei16_v, {})
+CONSTOPT(vsuxseg4ei16_v, {})
+CONSTOPT(vsuxseg5ei16_v, {})
+CONSTOPT(vsuxseg6ei16_v, {})
+CONSTOPT(vsuxseg7ei16_v, {})
+CONSTOPT(vsuxseg8ei16_v, {})
+CONSTOPT(vsuxseg2ei32_v, {})
+CONSTOPT(vsuxseg3ei32_v, {})
+CONSTOPT(vsuxseg4ei32_v, {})
+CONSTOPT(vsuxseg5ei32_v, {})
+CONSTOPT(vsuxseg6ei32_v, {})
+CONSTOPT(vsuxseg7ei32_v, {})
+CONSTOPT(vsuxseg8ei32_v, {})
+CONSTOPT(vsuxseg2ei64_v, {})
+CONSTOPT(vsuxseg3ei64_v, {})
+CONSTOPT(vsuxseg4ei64_v, {})
+CONSTOPT(vsuxseg5ei64_v, {})
+CONSTOPT(vsuxseg6ei64_v, {})
+CONSTOPT(vsuxseg7ei64_v, {})
+CONSTOPT(vsuxseg8ei64_v, {})
+CONSTOPT(vsse8_v, {})
+CONSTOPT(vsse16_v, {})
+CONSTOPT(vsse32_v, {})
+CONSTOPT(vsse64_v, {})
+CONSTOPT(vssseg2e8_v, {})
+CONSTOPT(vssseg3e8_v, {})
+CONSTOPT(vssseg4e8_v, {})
+CONSTOPT(vssseg5e8_v, {})
+CONSTOPT(vssseg6e8_v, {})
+CONSTOPT(vssseg7e8_v, {})
+CONSTOPT(vssseg8e8_v, {})
+CONSTOPT(vssseg2e16_v, {})
+CONSTOPT(vssseg3e16_v, {})
+CONSTOPT(vssseg4e16_v, {})
+CONSTOPT(vssseg5e16_v, {})
+CONSTOPT(vssseg6e16_v, {})
+CONSTOPT(vssseg7e16_v, {})
+CONSTOPT(vssseg8e16_v, {})
+CONSTOPT(vssseg2e32_v, {})
+CONSTOPT(vssseg3e32_v, {})
+CONSTOPT(vssseg4e32_v, {})
+CONSTOPT(vssseg5e32_v, {})
+CONSTOPT(vssseg6e32_v, {})
+CONSTOPT(vssseg7e32_v, {})
+CONSTOPT(vssseg8e32_v, {})
+CONSTOPT(vssseg2e64_v, {})
+CONSTOPT(vssseg3e64_v, {})
+CONSTOPT(vssseg4e64_v, {})
+CONSTOPT(vssseg5e64_v, {})
+CONSTOPT(vssseg6e64_v, {})
+CONSTOPT(vssseg7e64_v, {})
+CONSTOPT(vssseg8e64_v, {})
+CONSTOPT(vsoxei8_v, {})
+CONSTOPT(vsoxei16_v, {})
+CONSTOPT(vsoxei32_v, {})
+CONSTOPT(vsoxei64_v, {})
+CONSTOPT(vsoxseg2ei8_v, {})
+CONSTOPT(vsoxseg3ei8_v, {})
+CONSTOPT(vsoxseg4ei8_v, {})
+CONSTOPT(vsoxseg5ei8_v, {})
+CONSTOPT(vsoxseg6ei8_v, {})
+CONSTOPT(vsoxseg7ei8_v, {})
+CONSTOPT(vsoxseg8ei8_v, {})
+CONSTOPT(vsoxseg2ei16_v, {})
+CONSTOPT(vsoxseg3ei16_v, {})
+CONSTOPT(vsoxseg4ei16_v, {})
+CONSTOPT(vsoxseg5ei16_v, {})
+CONSTOPT(vsoxseg6ei16_v, {})
+CONSTOPT(vsoxseg7ei16_v, {})
+CONSTOPT(vsoxseg8ei16_v, {})
+CONSTOPT(vsoxseg2ei32_v, {})
+CONSTOPT(vsoxseg3ei32_v, {})
+CONSTOPT(vsoxseg4ei32_v, {})
+CONSTOPT(vsoxseg5ei32_v, {})
+CONSTOPT(vsoxseg6ei32_v, {})
+CONSTOPT(vsoxseg7ei32_v, {})
+CONSTOPT(vsoxseg8ei32_v, {})
+CONSTOPT(vsoxseg2ei64_v, {})
+CONSTOPT(vsoxseg3ei64_v, {})
+CONSTOPT(vsoxseg4ei64_v, {})
+CONSTOPT(vsoxseg5ei64_v, {})
+CONSTOPT(vsoxseg6ei64_v, {})
+CONSTOPT(vsoxseg7ei64_v, {})
+CONSTOPT(vsoxseg8ei64_v, {})
+CONSTOPT(vadd_vv, {})
+CONSTOPT(vadd_vx, {})
+CONSTOPT(vadd_vi, {})
+CONSTOPT(vsub_vv, {})
+CONSTOPT(vsub_vx, {})
+CONSTOPT(vrsub_vx, {})
+CONSTOPT(vrsub_vi, {})
+CONSTOPT(vminu_vv, {})
+CONSTOPT(vminu_vx, {})
+CONSTOPT(vmin_vv, {})
+CONSTOPT(vmin_vx, {})
+CONSTOPT(vmaxu_vv, {})
+CONSTOPT(vmaxu_vx, {})
+CONSTOPT(vmax_vv, {})
+CONSTOPT(vmax_vx, {})
+CONSTOPT(vand_vv, {})
+CONSTOPT(vand_vx, {})
+CONSTOPT(vand_vi, {})
+CONSTOPT(vor_vv, {})
+CONSTOPT(vor_vx, {})
+CONSTOPT(vor_vi, {})
+CONSTOPT(vxor_vv, {})
+CONSTOPT(vxor_vx, {})
+CONSTOPT(vxor_vi, {})
+CONSTOPT(vrgather_vv, {})
+CONSTOPT(vrgather_vx, {})
+CONSTOPT(vrgather_vi, {})
+CONSTOPT(vslideup_vx, {})
+CONSTOPT(vslideup_vi, {})
+CONSTOPT(vrgatherei16_vv, {})
+CONSTOPT(vslidedown_vx, {})
+CONSTOPT(vslidedown_vi, {})
+CONSTOPT(vadc_vvm, {})
+CONSTOPT(vadc_vxm, {})
+CONSTOPT(vadc_vim, {})
+CONSTOPT(vmadc_vv, {})
+CONSTOPT(vmadc_vx, {})
+CONSTOPT(vmadc_vi, {})
+CONSTOPT(vsbc_vvm, {})
+CONSTOPT(vsbc_vxm, {})
+CONSTOPT(vmsbc_vv, {})
+CONSTOPT(vmsbc_vx, {})
+CONSTOPT(vmerge_vvm, {})
+CONSTOPT(vmerge_vxm, {})
+CONSTOPT(vmerge_vim, {})
+CONSTOPT(vmv_v_v, {})
+CONSTOPT(vmv_v_x, {})
+CONSTOPT(vmv_v_i, {})
+CONSTOPT(vmseq_vv, {})
+CONSTOPT(vmseq_vx, {})
+CONSTOPT(vmseq_vi, {})
+CONSTOPT(vmsne_vv, {})
+CONSTOPT(vmsne_vx, {})
+CONSTOPT(vmsne_vi, {})
+CONSTOPT(vmsltu_vv, {})
+CONSTOPT(vmsltu_vx, {})
+CONSTOPT(vmslt_vv, {})
+CONSTOPT(vmslt_vx, {})
+CONSTOPT(vmsleu_vv, {})
+CONSTOPT(vmsleu_vx, {})
+CONSTOPT(vmsleu_vi, {})
+CONSTOPT(vmsle_vv, {})
+CONSTOPT(vmsle_vx, {})
+CONSTOPT(vmsle_vi, {})
+CONSTOPT(vmsgtu_vx, {})
+CONSTOPT(vmsgtu_vi, {})
+CONSTOPT(vmsgt_vx, {})
+CONSTOPT(vmsgt_vi, {})
+CONSTOPT(vsaddu_vv, {})
+CONSTOPT(vsaddu_vx, {})
+CONSTOPT(vsaddu_vi, {})
+CONSTOPT(vsadd_vv, {})
+CONSTOPT(vsadd_vx, {})
+CONSTOPT(vsadd_vi, {})
+CONSTOPT(vssubu_vv, {})
+CONSTOPT(vssubu_vx, {})
+CONSTOPT(vssub_vv, {})
+CONSTOPT(vssub_vx, {})
+CONSTOPT(vsll_vv, {})
+CONSTOPT(vsll_vx, {})
+CONSTOPT(vsll_vi, {})
+CONSTOPT(vsmul_vv, {})
+CONSTOPT(vsmul_vx, {})
+CONSTOPT(vsrl_vv, {})
+CONSTOPT(vsrl_vx, {})
+CONSTOPT(vsrl_vi, {})
+CONSTOPT(vsra_vv, {})
+CONSTOPT(vsra_vx, {})
+CONSTOPT(vsra_vi, {})
+CONSTOPT(vssrl_vv, {})
+CONSTOPT(vssrl_vx, {})
+CONSTOPT(vssrl_vi, {})
+CONSTOPT(vssra_vv, {})
+CONSTOPT(vssra_vx, {})
+CONSTOPT(vssra_vi, {})
+CONSTOPT(vnsrl_wv, {})
+CONSTOPT(vnsrl_wx, {})
+CONSTOPT(vnsrl_wi, {})
+CONSTOPT(vnsra_wv, {})
+CONSTOPT(vnsra_wx, {})
+CONSTOPT(vnsra_wi, {})
+CONSTOPT(vnclipu_wv, {})
+CONSTOPT(vnclipu_wx, {})
+CONSTOPT(vnclipu_wi, {})
+CONSTOPT(vnclip_wv, {})
+CONSTOPT(vnclip_wx, {})
+CONSTOPT(vnclip_wi, {})
+CONSTOPT(vwredsumu_vs, {})
+CONSTOPT(vwredsum_vs, {})
+CONSTOPT(vredsum_vs, {})
+CONSTOPT(vredand_vs, {})
+CONSTOPT(vredor_vs, {})
+CONSTOPT(vredxor_vs, {})
+CONSTOPT(vredminu_vs, {})
+CONSTOPT(vredmin_vs, {})
+CONSTOPT(vredmaxu_vs, {})
+CONSTOPT(vredmax_vs, {})
+CONSTOPT(vaaddu_vv, {})
+CONSTOPT(vaaddu_vx, {})
+CONSTOPT(vaadd_vv, {})
+CONSTOPT(vaadd_vx, {})
+CONSTOPT(vasubu_vv, {})
+CONSTOPT(vasubu_vx, {})
+CONSTOPT(vasub_vv, {})
+CONSTOPT(vasub_vx, {})
+CONSTOPT(vslide1up_vx, {})
+CONSTOPT(vslide1down_vx, {})
+CONSTOPT(vcompress_vm, {})
+CONSTOPT(vmandn_mm, {})
+CONSTOPT(vmand_mm, {})
+CONSTOPT(vmor_mm, {})
+CONSTOPT(vmxor_mm, {})
+CONSTOPT(vmorn_mm, {})
+CONSTOPT(vmnand_mm, {})
+CONSTOPT(vmnor_mm, {})
+CONSTOPT(vmxnor_mm, {})
+CONSTOPT(vdivu_vv, {})
+CONSTOPT(vdivu_vx, {})
+CONSTOPT(vdiv_vv, {})
+CONSTOPT(vdiv_vx, {})
+CONSTOPT(vremu_vv, {})
+CONSTOPT(vremu_vx, {})
+CONSTOPT(vrem_vv, {})
+CONSTOPT(vrem_vx, {})
+CONSTOPT(vmulhu_vv, {})
+CONSTOPT(vmulhu_vx, {})
+CONSTOPT(vmul_vv, {})
+CONSTOPT(vmul_vx, {})
+CONSTOPT(vmulhsu_vv, {})
+CONSTOPT(vmulhsu_vx, {})
+CONSTOPT(vmulh_vv, {})
+CONSTOPT(vmulh_vx, {})
+CONSTOPT(vmadd_vv, {})
+CONSTOPT(vmadd_vx, {})
+CONSTOPT(vnmsub_vv, {})
+CONSTOPT(vnmsub_vx, {})
+CONSTOPT(vmacc_vv, {})
+CONSTOPT(vmacc_vx, {})
+CONSTOPT(vnmsac_vv, {})
+CONSTOPT(vnmsac_vx, {})
+CONSTOPT(vwaddu_vv, {})
+CONSTOPT(vwaddu_vx, {})
+CONSTOPT(vwadd_vv, {})
+CONSTOPT(vwadd_vx, {})
+CONSTOPT(vwsubu_vv, {})
+CONSTOPT(vwsubu_vx, {})
+CONSTOPT(vwsub_vv, {})
+CONSTOPT(vwsub_vx, {})
+CONSTOPT(vwaddu_wv, {})
+CONSTOPT(vwaddu_wx, {})
+CONSTOPT(vwadd_wv, {})
+CONSTOPT(vwadd_wx, {})
+CONSTOPT(vwsubu_wv, {})
+CONSTOPT(vwsubu_wx, {})
+CONSTOPT(vwsub_wv, {})
+CONSTOPT(vwsub_wx, {})
+CONSTOPT(vwmulu_vv, {})
+CONSTOPT(vwmulu_vx, {})
+CONSTOPT(vwmulsu_vv, {})
+CONSTOPT(vwmulsu_vx, {})
+CONSTOPT(vwmul_vv, {})
+CONSTOPT(vwmul_vx, {})
+CONSTOPT(vwmaccu_vv, {})
+CONSTOPT(vwmaccu_vx, {})
+CONSTOPT(vwmacc_vv, {})
+CONSTOPT(vwmacc_vx, {})
+CONSTOPT(vwmaccus_vx, {})
+CONSTOPT(vwmaccsu_vv, {})
+CONSTOPT(vwmaccsu_vx, {})
+CONSTOPT(vmv_s_x, {})
+CONSTOPT(vmv_x_s, {})
+CONSTOPT(vcpop_m, {})
+CONSTOPT(vfirst_m, {})
+CONSTOPT(vmsbf_m, {})
+CONSTOPT(vmsof_m, {})
+CONSTOPT(vmsif_m, {})
+CONSTOPT(viota_m, {})
+CONSTOPT(vid_v, {})
+CONSTOPT(vfadd_vv, {})
+CONSTOPT(vfadd_vf, {})
+CONSTOPT(vfredusum_vs, {})
+CONSTOPT(vfsub_vv, {})
+CONSTOPT(vfsub_vf, {})
+CONSTOPT(vfredosum_vs, {})
+CONSTOPT(vfmin_vv, {})
+CONSTOPT(vfmin_vf, {})
+CONSTOPT(vfredmin_vs, {})
+CONSTOPT(vfmax_vv, {})
+CONSTOPT(vfmax_vf, {})
+CONSTOPT(vfredmax_vs, {})
+CONSTOPT(vfsgnj_vv, {})
+CONSTOPT(vfsgnj_vf, {})
+CONSTOPT(vfsgnjn_vv, {})
+CONSTOPT(vfsgnjn_vf, {})
+CONSTOPT(vfsgnjx_vv, {})
+CONSTOPT(vfsgnjx_vf, {})
+CONSTOPT(vfslide1up_vf, {})
+CONSTOPT(vfslide1down_vf, {})
+CONSTOPT(vfmerge_vfm, {})
+CONSTOPT(vfmv_v_f, {})
+CONSTOPT(vmfeq_vv, {})
+CONSTOPT(vmfeq_vf, {})
+CONSTOPT(vmfle_vv, {})
+CONSTOPT(vmfle_vf, {})
+CONSTOPT(vmflt_vv, {})
+CONSTOPT(vmflt_vf, {})
+CONSTOPT(vmfne_vv, {})
+CONSTOPT(vmfne_vf, {})
+CONSTOPT(vmfgt_vf, {})
+CONSTOPT(vmfge_vf, {})
+CONSTOPT(vfdiv_vv, {})
+CONSTOPT(vfdiv_vf, {})
+CONSTOPT(vfrdiv_vf, {})
+CONSTOPT(vfmul_vv, {})
+CONSTOPT(vfmul_vf, {})
+CONSTOPT(vfrsub_vf, {})
+CONSTOPT(vfmadd_vv, {})
+CONSTOPT(vfmadd_vf, {})
+CONSTOPT(vfnmadd_vv, {})
+CONSTOPT(vfnmadd_vf, {})
+CONSTOPT(vfmsub_vv, {})
+CONSTOPT(vfmsub_vf, {})
+CONSTOPT(vfnmsub_vv, {})
+CONSTOPT(vfnmsub_vf, {})
+CONSTOPT(vfmacc_vv, {})
+CONSTOPT(vfmacc_vf, {})
+CONSTOPT(vfnmacc_vv, {})
+CONSTOPT(vfnmacc_vf, {})
+CONSTOPT(vfmsac_vv, {})
+CONSTOPT(vfmsac_vf, {})
+CONSTOPT(vfnmsac_vv, {})
+CONSTOPT(vfnmsac_vf, {})
+CONSTOPT(vfwadd_vv, {})
+CONSTOPT(vfwadd_vf, {})
+CONSTOPT(vfwredusum_vs, {})
+CONSTOPT(vfwsub_vv, {})
+CONSTOPT(vfwsub_vf, {})
+CONSTOPT(vfwredosum_vs, {})
+CONSTOPT(vfwadd_wv, {})
+CONSTOPT(vfwadd_wf, {})
+CONSTOPT(vfwsub_wv, {})
+CONSTOPT(vfwsub_wf, {})
+CONSTOPT(vfwmul_vv, {})
+CONSTOPT(vfwmul_vf, {})
+CONSTOPT(vfwmacc_vv, {})
+CONSTOPT(vfwmacc_vf, {})
+CONSTOPT(vfwnmacc_vv, {})
+CONSTOPT(vfwnmacc_vf, {})
+CONSTOPT(vfwmsac_vv, {})
+CONSTOPT(vfwmsac_vf, {})
+CONSTOPT(vfwnmsac_vv, {})
+CONSTOPT(vfwnmsac_vf, {})
 #endif
diff --git a/src/rv32_template.c b/src/rv32_template.c
index a2354daf..77215036 100644
--- a/src/rv32_template.c
+++ b/src/rv32_template.c
@@ -3031,3 +3031,4644 @@ RVOP(
     }))
 
 #endif
+
+#if RV32_HAS(EXT_V)
+#define V_NOP                        \
+    for (int i = 0; i < 4; i++) {    \
+        (rv)->V[rv_reg_zero][i] = 0; \
+    }
+
+#define VREG_U32_COUNT ((VLEN) >> (5))
+/*
+ * Vector Configuration-Setting Instructions
+ *
+ * These instructions set the vector CSRs, specifically csr_vl and csr_vtype.
+ * The CSRs can only be updated using vset{i}vl{i} instructions. The current
+ * implementation does not support vma and vta.
+ *
+ * The value VLMAX = (LMUL * VLEN) / SEW represents the maximum number of
+ * elements that can be processed by a single vector instruction given the
+ * current SEW and LMUL.
+ *
+ * Constraints on Setting vl:
+ *  - vl = AVL if AVL ≤ VLMAX
+ *  - ceil(AVL / 2) ≤ vl ≤ VLMAX if AVL < 2 * VLMAX
+ *  - vl = VLMAX if AVL ≥ 2 * VLMAX
+ *
+ * +------------+------+--------------+
+ * | vlmul[2:0] | LMUL |    VLMAX     |
+ * +------------+------+--------------+
+ * |    1 0 0   |  -   |       -      |
+ * |    1 0 1   | 1/8  |  VLEN/SEW/8  |
+ * |    1 1 0   | 1/4  |  VLEN/SEW/4  |
+ * |    1 1 1   | 1/2  |  VLEN/SEW/2  |
+ * |    0 0 0   |  1   |  VLEN/SEW    |
+ * |    0 0 1   |  2   |  2*VLEN/SEW  |
+ * |    0 1 0   |  4   |  4*VLEN/SEW  |
+ * |    0 1 1   |  8   |  8*VLEN/SEW  |
+ * +------------+------+--------------+
+ *
+ * LMUL determines how vector registers are grouped. Since VL controls the
+ * number of processed elements (based on SEW) and is derived from VLMAX,
+ * LMUL's primary role is setting VLMAX. This implementation computes VLMAX
+ * directly, avoiding fractional LMUL values (e.g., 1/2, 1/4, 1/8).
+ *
+ * Mapping of rd, rs1, and AVL value effects on vl:
+ * +-----+-----+------------------+----------------------------------+
+ * | rd  | rs1 |    AVL value     |         Effect on vl             |
+ * +-----+-----+------------------+----------------------------------+
+ * |  -  | !x0 | Value in x[rs1]  | Normal stripmining               |
+ * | !x0 |  x0 | ~0               | Set vl to VLMAX                  |
+ * |  x0 |  x0 | Value in vl reg  | Keep existing vl                 |
+ * +-----+-----+------------------+----------------------------------+
+ *
+ * +------------+----------+
+ * | vsew[2:0]  |   SEW    |
+ * +------------+----------+
+ * |    0 0 0   |     8    |
+ * |    0 0 1   |    16    |
+ * |    0 1 0   |    32    |
+ * |    0 1 1   |    64    |
+ * |    1 X X   | Reserved |
+ * +------------+----------+
+ */
+
+#define vl_setting(vlmax_, rs1, vl)    \
+    if ((rs1) <= vlmax_) {             \
+        (vl) = (rs1);                  \
+    } else if ((rs1) < (2 * vlmax_)) { \
+        (vl) = vlmax_;                 \
+    } else {                           \
+        (vl) = vlmax_;                 \
+    }
+
+RVOP(
+    vsetvli,
+    {
+        uint8_t v_lmul = ir->zimm & 0b111;
+        uint8_t v_sew = (ir->zimm >> 3) & 0b111;
+
+        if (v_lmul == 4 || v_sew >= 4) {
+            /* Illegal setting */
+            rv->csr_vl = 0;
+            rv->csr_vtype = 0x80000000;
+            return true;
+        }
+        uint16_t vlmax = (v_lmul < 4)
+                             ? ((1 << v_lmul) * VLEN) >> (3 + v_sew)
+                             : (VLEN >> (3 + v_sew) >> (3 - (v_lmul - 5)));
+        if (ir->rs1) {
+            vl_setting(vlmax, rv->X[ir->rs1], rv->csr_vl);
+            rv->csr_vtype = ir->zimm;
+        } else {
+            if (!ir->rd) {
+                rv->csr_vtype = ir->zimm;
+            } else {
+                rv->csr_vl = vlmax;
+                rv->csr_vtype = ir->zimm;
+            }
+        }
+        rv->X[ir->rd] = rv->csr_vl;
+    },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsetivli,
+    {
+        uint8_t v_lmul = ir->zimm & 0b111;
+        uint8_t v_sew = (ir->zimm >> 3) & 0b111;
+
+        if (v_lmul == 4 || v_sew >= 4) {
+            /* Illegal setting */
+            rv->csr_vl = 0;
+            rv->csr_vtype = 0x80000000;
+            return true;
+        }
+        uint16_t vlmax = (v_lmul < 4)
+                             ? ((1 << v_lmul) * VLEN) >> (3 + v_sew)
+                             : (VLEN >> (3 + v_sew) >> (3 - (v_lmul - 5)));
+        if (ir->rs1) {
+            vl_setting(vlmax, ir->rs1, rv->csr_vl);
+            rv->csr_vtype = ir->zimm;
+        } else {
+            if (!ir->rd) {
+                rv->csr_vtype = ir->zimm;
+            } else {
+                rv->csr_vl = vlmax;
+                rv->csr_vtype = ir->zimm;
+            }
+        }
+        rv->X[ir->rd] = rv->csr_vl;
+    },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsetvl,
+    {
+        uint8_t v_lmul = rv->X[ir->rs2] & 0b111;
+        uint8_t v_sew = (rv->X[ir->rs2] >> 3) & 0b111;
+
+        if (v_lmul == 4 || v_sew >= 4) {
+            /* Illegal setting */
+            rv->csr_vl = 0;
+            rv->csr_vtype = 0x80000000;
+            return true;
+        }
+        uint16_t vlmax = (v_lmul < 4)
+                             ? ((1 << v_lmul) * VLEN) >> (3 + v_sew)
+                             : (VLEN >> (3 + v_sew) >> (3 - (v_lmul - 5)));
+        if (rv->X[ir->rs1]) {
+            vl_setting(vlmax, rv->X[ir->rs1], rv->csr_vl);
+            rv->csr_vtype = rv->X[ir->rs2];
+        } else {
+            if (!ir->rd) {
+                rv->csr_vtype = rv->X[ir->rs2];
+            } else {
+                rv->csr_vl = vlmax;
+                rv->csr_vtype = rv->X[ir->rs2];
+            }
+        }
+        rv->X[ir->rd] = rv->csr_vl;
+    },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+#undef vl_setting
+
+/*
+ * In RVV, vector register v0 serves as the mask register. Each bit in v0
+ * indicates whether the corresponding element in other vector registers should
+ * be updated or left unmodified. When ir->vm == 1, masking is disabled. When
+ * ir->vm == 0, masking is enabled, and for each element, the bit in v0
+ * determines whether to use the newly computed result (bit = 1) or keep the
+ * original value in the destination register (bit = 0).
+ *
+ * The macro VECTOR_DISPATCH(des, op1, op2, op, op_type) selects the
+ * corresponding handler based on the sew from csr_vtype. It then calls one of
+ * the sew_*b_handler functions for 8-bit, 16-bit, or 32-bit operations. Each
+ * handler checks csr_vl to determine how many elements need to be processed and
+ * uses one of the three macros VV_LOOP, VX_LOOP, VI_LOOP depending on whether
+ * the second operand is a vector, a scalar, or an immediate. These LOOP macros
+ * handle one 32-bit word at a time and pass remainder to their respective
+ * V*_LOOP_LEFT macro if the csr_vl is not evenly divisible.
+ *
+ * Inside each loop macro, SHIFT and MASK determine how to isolate and position
+ * sub-elements within each 32-bit word. SHIFT specifies how many bits to shift
+ * for each sub-element, and MASK filters out the bits that belong to a single
+ * sub-element, such as 0xFF for 8-bit or 0xFFFF for 16-bit. Index __i tracks
+ * which 32-bit word within the vector register is processed; when it reaches
+ * the end of the current VLEN vector register, __j increments to move on to the
+ * next vector register, and __i resets.
+ *
+ * The current approach supports only SEW values of 8, 16, and 32 bits.
+ */
+
+#define VECTOR_DISPATCH(des, op1, op2, op, op_type)      \
+    {                                                    \
+        switch (8 << ((rv->csr_vtype >> 3) & 0b111)) {   \
+        case 8:                                          \
+            sew_8b_handler(des, op1, op2, op, op_type);  \
+            break;                                       \
+        case 16:                                         \
+            sew_16b_handler(des, op1, op2, op, op_type); \
+            break;                                       \
+        case 32:                                         \
+            sew_32b_handler(des, op1, op2, op, op_type); \
+            break;                                       \
+        default:                                         \
+            break;                                       \
+        }                                                \
+    }
+
+#define VI_LOOP(des, op1, op2, op, SHIFT, MASK, i, j, itr, vm)                 \
+    uint32_t tmp_1 = rv->V[op1 + j][i];                                        \
+    uint32_t tmp_d = rv->V[des + j][i];                                        \
+    uint32_t ans = 0;                                                          \
+    rv->V[des + j][i] = 0;                                                     \
+    for (uint8_t k = 0; k < itr; k++) {                                        \
+        if (ir->vm) {                                                          \
+            ans = ((op_##op((tmp_1 >> (k << (SHIFT))), (op2))) & (MASK))       \
+                  << (k << (SHIFT));                                           \
+        } else {                                                               \
+            ans = (vm & (0x1 << k))                                            \
+                      ? ((op_##op((tmp_1 >> (k << (SHIFT))), (op2))) & (MASK)) \
+                            << (k << (SHIFT))                                  \
+                      : (tmp_d & (MASK << (k << (SHIFT))));                    \
+        }                                                                      \
+        rv->V[des + j][i] += ans;                                              \
+    }
+
+#define VI_LOOP_LEFT(des, op1, op2, op, SHIFT, MASK, i, j, itr, vm)            \
+    uint32_t tmp_1 = rv->V[op1 + j][i];                                        \
+    uint32_t tmp_d = rv->V[des + j][i];                                        \
+    if (rv->csr_vl % itr) {                                                    \
+        rv->V[des + j][i] &= (0xFFFFFFFF << ((rv->csr_vl % itr) << SHIFT));    \
+    }                                                                          \
+    uint32_t ans = 0;                                                          \
+    for (uint8_t k = 0; k < (rv->csr_vl % itr); k++) {                         \
+        assert((des + j) < 32);                                                \
+        if (ir->vm) {                                                          \
+            ans = ((op_##op((tmp_1 >> (k << (SHIFT))), (op2))) & (MASK))       \
+                  << (k << (SHIFT));                                           \
+        } else {                                                               \
+            ans = (vm & (0x1 << k))                                            \
+                      ? ((op_##op((tmp_1 >> (k << (SHIFT))), (op2))) & (MASK)) \
+                            << (k << (SHIFT))                                  \
+                      : (tmp_d & (MASK << (k << (SHIFT))));                    \
+        }                                                                      \
+        rv->V[des + j][i] += ans;                                              \
+    }
+
+#define VV_LOOP(des, op1, op2, op, SHIFT, MASK, i, j, itr, vm)                \
+    uint32_t tmp_1 = rv->V[op1 + j][i];                                       \
+    uint32_t tmp_2 = rv->V[op2 + j][i];                                       \
+    uint32_t tmp_d = rv->V[des + j][i];                                       \
+    uint32_t ans = 0;                                                         \
+    rv->V[des + j][i] = 0;                                                    \
+    for (uint8_t k = 0; k < itr; k++) {                                       \
+        if (ir->vm) {                                                         \
+            ans = ((op_##op((tmp_1 >> (k << (SHIFT))),                        \
+                            (tmp_2 >> (k << (SHIFT))))) &                     \
+                   (MASK))                                                    \
+                  << (k << (SHIFT));                                          \
+        } else {                                                              \
+            ans = (vm & (0x1 << k)) ? ((op_##op((tmp_1 >> (k << (SHIFT))),    \
+                                                (tmp_2 >> (k << (SHIFT))))) & \
+                                       (MASK))                                \
+                                          << (k << (SHIFT))                   \
+                                    : (tmp_d & (MASK << (k << (SHIFT))));     \
+        }                                                                     \
+        rv->V[des + j][i] += ans;                                             \
+    }
+
+#define VV_LOOP_LEFT(des, op1, op2, op, SHIFT, MASK, i, j, itr, vm)           \
+    uint32_t tmp_1 = rv->V[op1 + j][i];                                       \
+    uint32_t tmp_2 = rv->V[op2 + j][i];                                       \
+    uint32_t tmp_d = rv->V[des + j][i];                                       \
+    if (rv->csr_vl % itr) {                                                   \
+        rv->V[des + j][i] &= (0xFFFFFFFF << ((rv->csr_vl % itr) << SHIFT));   \
+    }                                                                         \
+    uint32_t ans = 0;                                                         \
+    for (uint8_t k = 0; k < (rv->csr_vl % itr); k++) {                        \
+        assert((des + j) < 32);                                               \
+        if (ir->vm) {                                                         \
+            ans = ((op_##op((tmp_1 >> (k << (SHIFT))),                        \
+                            (tmp_2 >> (k << (SHIFT))))) &                     \
+                   (MASK))                                                    \
+                  << (k << (SHIFT));                                          \
+        } else {                                                              \
+            ans = (vm & (0x1 << k)) ? ((op_##op((tmp_1 >> (k << (SHIFT))),    \
+                                                (tmp_2 >> (k << (SHIFT))))) & \
+                                       (MASK))                                \
+                                          << (k << (SHIFT))                   \
+                                    : (tmp_d & (MASK << (k << (SHIFT))));     \
+        }                                                                     \
+        rv->V[des + j][i] += ans;                                             \
+    }
+
+#define VX_LOOP(des, op1, op2, op, SHIFT, MASK, i, j, itr, vm)                 \
+    uint32_t tmp_1 = rv->V[op1 + j][i];                                        \
+    uint32_t tmp_2 = rv->X[op2];                                               \
+    uint32_t tmp_d = rv->V[des + j][i];                                        \
+    uint32_t ans = 0;                                                          \
+    rv->V[des + j][i] = 0;                                                     \
+    for (uint8_t k = 0; k < itr; k++) {                                        \
+        if (ir->vm) {                                                          \
+            ans = ((op_##op((tmp_1 >> (k << (SHIFT))), (tmp_2))) & (MASK))     \
+                  << (k << (SHIFT));                                           \
+        } else {                                                               \
+            ans =                                                              \
+                (vm & (0x1 << k))                                              \
+                    ? ((op_##op((tmp_1 >> (k << (SHIFT))), (tmp_2))) & (MASK)) \
+                          << (k << (SHIFT))                                    \
+                    : (tmp_d & (MASK << (k << (SHIFT))));                      \
+        }                                                                      \
+        rv->V[des + j][i] += ans;                                              \
+    }
+
+#define VX_LOOP_LEFT(des, op1, op2, op, SHIFT, MASK, i, j, itr, vm)            \
+    uint32_t tmp_1 = rv->V[op1 + j][i];                                        \
+    uint32_t tmp_2 = rv->X[op2];                                               \
+    uint32_t tmp_d = rv->V[des + j][i];                                        \
+    if (rv->csr_vl % itr) {                                                    \
+        rv->V[des + j][i] &= (0xFFFFFFFF << ((rv->csr_vl % itr) << SHIFT));    \
+    }                                                                          \
+    uint32_t ans = 0;                                                          \
+    for (uint8_t k = 0; k < (rv->csr_vl % itr); k++) {                         \
+        assert((des + j) < 32);                                                \
+        if (ir->vm) {                                                          \
+            ans = ((op_##op((tmp_1 >> (k << (SHIFT))), (tmp_2))) & (MASK))     \
+                  << (k << (SHIFT));                                           \
+        } else {                                                               \
+            ans =                                                              \
+                (vm & (0x1 << k))                                              \
+                    ? ((op_##op((tmp_1 >> (k << (SHIFT))), (tmp_2))) & (MASK)) \
+                          << (k << (SHIFT))                                    \
+                    : (tmp_d & (MASK << (k << (SHIFT))));                      \
+        }                                                                      \
+        rv->V[des + j][i] += ans;                                              \
+    }
+
+#define sew_8b_handler(des, op1, op2, op, op_type)                             \
+    {                                                                          \
+        uint8_t __i = 0;                                                       \
+        uint8_t __j = 0;                                                       \
+        uint8_t __m = 0;                                                       \
+        uint32_t vm = rv->V[0][__m];                                           \
+        for (uint32_t __k = 0; (rv->csr_vl - __k) >= 4;) {                     \
+            __i %= VREG_U32_COUNT;                                             \
+            assert((des + __j) < 32);                                          \
+            op_type##_LOOP(des, op1, op2, op, 3, 0xFF, __i, __j, 4, vm);       \
+            __k += 4;                                                          \
+            __i++;                                                             \
+            /* If multiple of 16. In sew = 8, 16 * (sew=8) forms a 128b vector \
+               register */                                                     \
+            if (!(__k % (VREG_U32_COUNT << 2))) {                              \
+                __j++;                                                         \
+                __i = 0;                                                       \
+            }                                                                  \
+            vm >>= 4;                                                          \
+            if (!(__k % 32)) {                                                 \
+                __m++;                                                         \
+                vm = rv->V[0][__m];                                            \
+            }                                                                  \
+        }                                                                      \
+        op_type##_LOOP_LEFT(des, op1, op2, op, 3, 0xFF, __i, __j, 4, vm);      \
+    }
+
+#define sew_16b_handler(des, op1, op2, op, op_type)                         \
+    {                                                                       \
+        uint8_t __i = 0;                                                    \
+        uint8_t __j = 0;                                                    \
+        uint8_t __m = 0;                                                    \
+        uint32_t vm = rv->V[0][__m];                                        \
+        for (uint32_t __k = 0; (rv->csr_vl - __k) >= 2;) {                  \
+            __i %= VREG_U32_COUNT;                                          \
+            assert((des + __j) < 32);                                       \
+            op_type##_LOOP(des, op1, op2, op, 4, 0xFFFF, __i, __j, 2, vm);  \
+            __k += 2;                                                       \
+            __i++;                                                          \
+            if (!(__k % (VREG_U32_COUNT << 1))) {                           \
+                __j++;                                                      \
+                __i = 0;                                                    \
+            }                                                               \
+            vm >>= 2;                                                       \
+            if (!(__k % 32)) {                                              \
+                __m++;                                                      \
+                vm = rv->V[0][__m];                                         \
+            }                                                               \
+        }                                                                   \
+        op_type##_LOOP_LEFT(des, op1, op2, op, 4, 0xFFFF, __i, __j, 2, vm); \
+    }
+
+#define sew_32b_handler(des, op1, op2, op, op_type)                            \
+    {                                                                          \
+        uint8_t __i = 0;                                                       \
+        uint8_t __j = 0;                                                       \
+        uint32_t vm = rv->V[0][__i];                                           \
+        for (uint32_t __k = 0; rv->csr_vl > __k;) {                            \
+            __i %= VREG_U32_COUNT;                                             \
+            assert((des + __j) < 32);                                          \
+            op_type##_LOOP(des, op1, op2, op, 0, 0xFFFFFFFF, __i, __j, 1, vm); \
+            __k += 1;                                                          \
+            __i++;                                                             \
+            if (!(__k % (VREG_U32_COUNT))) {                                   \
+                __j++;                                                         \
+                __i = 0;                                                       \
+            }                                                                  \
+            vm >>= 1;                                                          \
+        }                                                                      \
+    }
+
+RVOP(
+    vle8_v,
+    {
+        uint8_t sew = 8 << ((rv->csr_vtype >> 3) & 0b111);
+        uint32_t addr = rv->X[ir->rs1];
+
+        if (ir->eew > sew) {
+            /* Illegal */
+            rv->csr_vtype = 0x80000000;
+            rv->csr_vl = 0;
+            return true;
+        } else {
+            uint8_t i = 0;
+            uint8_t j = 0;
+            for (uint32_t cnt = 0; rv->csr_vl - cnt >= 4;) {
+                i %= VREG_U32_COUNT;
+                /* Set illegal when trying to access vector register that is
+                 * larger then 31.
+                 */
+                assert(ir->vd + j < 32);
+                /* Process full 32-bit words */
+                rv->V[ir->vd + j][i] = 0;
+                rv->V[ir->vd + j][i] |= rv->io.mem_read_b(rv, addr);
+                rv->V[ir->vd + j][i] |= rv->io.mem_read_b(rv, addr + 1) << 8;
+                rv->V[ir->vd + j][i] |= rv->io.mem_read_b(rv, addr + 2) << 16;
+                rv->V[ir->vd + j][i] |= rv->io.mem_read_b(rv, addr + 3) << 24;
+                cnt += 4;
+                i++;
+
+                /* Move to next vector register after filling VLEN */
+                if (!(cnt % (VREG_U32_COUNT << 2))) {
+                    j++;
+                    i = 0;
+                }
+                addr += 4;
+            }
+            /* Clear corresponding bits of eews */
+            if (rv->csr_vl % 4) {
+                rv->V[ir->vd + j][i] %= 0xFFFFFFFF << ((rv->csr_vl % 4) << 3);
+            }
+            /* Handle eews that is narrower then a word */
+            for (uint32_t cnt = 0; cnt < (rv->csr_vl % 4); cnt++) {
+                assert(ir->vd + j < 32); /* Illegal */
+                rv->V[ir->vd + j][i] |= rv->io.mem_read_b(rv, addr + cnt)
+                                        << (cnt << 3);
+            }
+        }
+    },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vle16_v,
+    {
+        uint8_t sew = 8 << ((rv->csr_vtype >> 3) & 0b111);
+        uint32_t addr = rv->X[ir->rs1];
+
+        if (ir->eew > sew) {
+            /* Illegal */
+            rv->csr_vtype = 0x80000000;
+            rv->csr_vl = 0;
+            return true;
+        } else {
+            uint8_t i = 0;
+            uint8_t j = 0;
+            for (uint32_t cnt = 0; rv->csr_vl - cnt >= 2;) {
+                i %= VREG_U32_COUNT;
+                assert(ir->vd + j < 32);
+                /* Process full 32-bit words */
+                rv->V[ir->vd + j][i] = 0;
+                rv->V[ir->vd + j][i] |= rv->io.mem_read_s(rv, addr);
+                rv->V[ir->vd + j][i] |= rv->io.mem_read_s(rv, addr + 2) << 16;
+                cnt += 2;
+                i++;
+
+                /* Move to next vector register after filling VLEN */
+                if (!(cnt % (VREG_U32_COUNT << 1))) {
+                    j++;
+                    i = 0;
+                }
+                addr += 4;
+            }
+            if (rv->csr_vl % 2) {
+                assert(ir->vd + j < 32); /* Illegal */
+                rv->V[ir->vd + j][i] |= rv->io.mem_read_s(rv, addr);
+            }
+        }
+    },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vle32_v,
+    {
+        uint8_t sew = 8 << ((rv->csr_vtype >> 3) & 0b111);
+        uint32_t addr = rv->X[ir->rs1];
+
+        if (ir->eew > sew) {
+            /* Illegal */
+            rv->csr_vtype = 0x80000000;
+            rv->csr_vl = 0;
+            return true;
+        } else {
+            uint8_t i = 0;
+            uint8_t j = 0;
+            for (uint32_t cnt = 0; rv->csr_vl > cnt;) {
+                i %= VREG_U32_COUNT;
+                assert(ir->vd + j < 32);
+                rv->V[ir->vd + j][i] = rv->io.mem_read_w(rv, addr);
+                cnt += 1;
+                i++;
+
+                if (!(cnt % VREG_U32_COUNT)) {
+                    j++;
+                    i = 0;
+                }
+                addr += 4;
+            }
+        }
+    },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vle64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg2e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg3e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg4e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg5e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg6e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg7e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg8e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg2e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg3e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg4e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg5e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg6e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg7e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg8e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg2e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg3e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg4e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg5e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg6e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg7e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg8e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg2e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg3e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg4e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg5e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg6e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg7e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg8e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vl1re8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vl1re16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vl1re32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vl1re64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vl2re8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vl2re16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+RVOP(
+    vl2re32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vl2re64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vl4re8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vl4re16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vl4re32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vl4re64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vl8re8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vl8re16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vl8re32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vl8re64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlm_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vle8ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vle16ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vle32ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vle64ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg2e8ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg3e8ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg4e8ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg5e8ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg6e8ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg7e8ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg8e8ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg2e16ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg3e16ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg4e16ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg5e16ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg6e16ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg7e16ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg8e16ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg2e32ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg3e32ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg4e32ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg5e32ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg6e32ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg7e32ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg8e32ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg2e64ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg3e64ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg4e64ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg5e64ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg6e64ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg7e64ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlseg8e64ff_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg2ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg3ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg4ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg5ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg6ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg7ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg8ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg2ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg3ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg4ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg5ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg6ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg7ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg8ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg2ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg3ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg4ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg5ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg6ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg7ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg8ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg2ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg3ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg4ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg5ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg6ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg7ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vluxseg8ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlse8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlse16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlse32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlse64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg2e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg3e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg4e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg5e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg6e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg7e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg8e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg2e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg3e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg4e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg5e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg6e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg7e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg8e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg2e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg3e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg4e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg5e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg6e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg7e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg8e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg2e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg3e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg4e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg5e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg6e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg7e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vlsseg8e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg2ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg3ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg4ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg5ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg6ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg7ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg8ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg2ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg3ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg4ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg5ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg6ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg7ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg8ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg2ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg3ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg4ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg5ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg6ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg7ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg8ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg2ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg3ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg4ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg5ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg6ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg7ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vloxseg8ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vse8_v,
+    {
+        uint8_t sew = 8 << ((rv->csr_vtype >> 3) & 0b111);
+        uint32_t addr = rv->X[ir->rs1];
+
+        if (ir->eew > sew) {
+            /* Illegal */
+            rv->csr_vtype = 0x80000000;
+            rv->csr_vl = 0;
+            return true;
+        } else {
+            uint8_t i = 0;
+            uint8_t j = 0;
+            for (uint32_t cnt = 0; rv->csr_vl - cnt >= 4;) {
+                i %= VREG_U32_COUNT;
+                /* Set illegal when trying to access vector register that is
+                 * larger then 31.
+                 */
+                assert(ir->vs3 + j < 32);
+                uint32_t tmp = rv->V[ir->vs3 + j][i];
+                /* Process full 32-bit words */
+                rv->io.mem_write_b(rv, addr, (tmp) & 0xff);
+                rv->io.mem_write_b(rv, addr + 1, (tmp >> 8) & 0xff);
+                rv->io.mem_write_b(rv, addr + 2, (tmp >> 16) & 0xff);
+                rv->io.mem_write_b(rv, addr + 3, (tmp >> 24) & 0xff);
+                cnt += 4;
+                i++;
+
+                /* Move to next vector register after filling VLEN */
+                if (!(cnt % (VREG_U32_COUNT << 2))) {
+                    j++;
+                    i = 0;
+                }
+                addr += 4;
+            }
+            /* Handle eews that is narrower then a word */
+            for (uint32_t cnt = 0; cnt < (rv->csr_vl % 4); cnt++) {
+                assert(ir->vs3 + j < 32); /* Illegal */
+                uint8_t tmp = (rv->V[ir->vs3 + j][i] >> (cnt << 3)) & 0xff;
+                rv->io.mem_write_b(rv, addr + cnt, tmp);
+            }
+        }
+    },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vse16_v,
+    {
+        uint8_t sew = 8 << ((rv->csr_vtype >> 3) & 0b111);
+        uint32_t addr = rv->X[ir->rs1];
+
+        if (ir->eew > sew) {
+            /* Illegal */
+            rv->csr_vtype = 0x80000000;
+            rv->csr_vl = 0;
+            return true;
+        } else {
+            uint8_t i = 0;
+            uint8_t j = 0;
+            for (uint32_t cnt = 0; rv->csr_vl - cnt >= 2;) {
+                i %= VREG_U32_COUNT;
+                assert(ir->vs3 + j < 32);
+                uint32_t tmp = rv->V[ir->vs3 + j][i];
+                /* Process full 32-bit words */
+                rv->io.mem_write_s(rv, addr, (tmp) & 0xffff);
+                rv->io.mem_write_s(rv, addr + 2, (tmp >> 16) & 0xffff);
+                cnt += 2;
+                i++;
+
+                if (!(cnt % (VREG_U32_COUNT << 1))) {
+                    j++;
+                    i = 0;
+                }
+                addr += 4;
+            }
+            if (rv->csr_vl % 2) {
+                rv->io.mem_write_s(rv, addr, rv->V[ir->vs3 + j][i] & 0xffff);
+            }
+        }
+    },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vse32_v,
+    {
+        uint8_t sew = 8 << ((rv->csr_vtype >> 3) & 0b111);
+        uint32_t addr = rv->X[ir->rs1];
+
+        if (ir->eew > sew) {
+            /* Illegal */
+            rv->csr_vtype = 0x80000000;
+            rv->csr_vl = 0;
+            return true;
+        } else {
+            uint8_t i = 0;
+            uint8_t j = 0;
+            for (uint32_t cnt = 0; rv->csr_vl > cnt;) {
+                i %= VREG_U32_COUNT;
+                assert(ir->vs3 + j < 32);
+                rv->io.mem_write_w(rv, addr, rv->V[ir->vs3 + j][i]);
+                cnt += 1;
+                i++;
+
+                if (!(cnt % (VREG_U32_COUNT))) {
+                    j++;
+                    i = 0;
+                }
+                addr += 4;
+            }
+        }
+    },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vse64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg2e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg3e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg4e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg5e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg6e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg7e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg8e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg2e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg3e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg4e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg5e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg6e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg7e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg8e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg2e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg3e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg4e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg5e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg6e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg7e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg8e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg2e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg3e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg4e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg5e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg6e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg7e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsseg8e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vs1r_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vs2r_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vs4r_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vs8r_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsm_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg2ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg3ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg4ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg5ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg6ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg7ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg8ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg2ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg3ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg4ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg5ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg6ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg7ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg8ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg2ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg3ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg4ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg5ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg6ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg7ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg8ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg2ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg3ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg4ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg5ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg6ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg7ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsuxseg8ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsse8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsse16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsse32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsse64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg2e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg3e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg4e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg5e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg6e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg7e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg8e8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg2e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg3e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg4e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg5e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg6e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg7e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg8e16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg2e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg3e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg4e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg5e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg6e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg7e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg8e32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg2e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg3e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg4e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg5e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg6e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg7e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssseg8e64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg2ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg3ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg4ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg5ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg6ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg7ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg8ei8_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg2ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg3ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg4ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg5ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg6ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg7ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg8ei16_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg2ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg3ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg4ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg5ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg6ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg7ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg8ei32_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg2ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg3ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg4ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg5ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg6ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg7ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsoxseg8ei64_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+#define op_add(a, b) ((a) + (b))
+RVOP(vadd_vv, {VECTOR_DISPATCH(ir->vd, ir->vs2, ir->vs1, add, VV)}, GEN({
+         assert; /* FIXME: Implement */
+     }))
+
+RVOP(vadd_vx, {VECTOR_DISPATCH(ir->vd, ir->vs2, ir->rs1, add, VX)}, GEN({
+         assert; /* FIXME: Implement */
+     }))
+
+RVOP(vadd_vi, {VECTOR_DISPATCH(ir->vd, ir->vs2, ir->imm, add, VI)}, GEN({
+         assert; /* FIXME: Implement */
+     }))
+#undef op_add
+
+#define op_sub(a, b) ((a) - (b))
+RVOP(vsub_vv, {VECTOR_DISPATCH(ir->vd, ir->vs2, ir->vs1, sub, VV)}, GEN({
+         assert; /* FIXME: Implement */
+     }))
+
+RVOP(vsub_vx, {VECTOR_DISPATCH(ir->vd, ir->vs2, ir->rs1, sub, VX)}, GEN({
+         assert; /* FIXME: Implement */
+     }))
+#undef op_sub
+
+#define op_rsub(a, b) ((b) - (a))
+RVOP(vrsub_vx, {VECTOR_DISPATCH(ir->vd, ir->vs2, ir->rs1, rsub, VX)}, GEN({
+         assert; /* FIXME: Implement */
+     }))
+
+RVOP(vrsub_vi, {VECTOR_DISPATCH(ir->vd, ir->vs2, ir->imm, rsub, VI)}, GEN({
+         assert; /* FIXME: Implement */
+     }))
+#undef op_rsub
+
+RVOP(
+    vminu_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vminu_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmin_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmin_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmaxu_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmaxu_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmax_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmax_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+#define op_and(a, b) ((a) & (b))
+RVOP(vand_vv, {VECTOR_DISPATCH(ir->vd, ir->vs2, ir->vs1, and, VV)}, GEN({
+         assert; /* FIXME: Implement */
+     }))
+
+RVOP(vand_vx, {VECTOR_DISPATCH(ir->vd, ir->vs2, ir->rs1, and, VX)}, GEN({
+         assert; /* FIXME: Implement */
+     }))
+
+RVOP(vand_vi, {VECTOR_DISPATCH(ir->vd, ir->vs2, ir->imm, and, VI)}, GEN({
+         assert; /* FIXME: Implement */
+     }))
+#undef op_and
+
+#define op_or(a, b) ((a) | (b))
+RVOP(vor_vv, {VECTOR_DISPATCH(ir->vd, ir->vs2, ir->vs1, or, VV)}, GEN({
+         assert; /* FIXME: Implement */
+     }))
+
+RVOP(vor_vx, {VECTOR_DISPATCH(ir->vd, ir->vs2, ir->rs1, or, VX)}, GEN({
+         assert; /* FIXME: Implement */
+     }))
+
+RVOP(vor_vi, {VECTOR_DISPATCH(ir->vd, ir->vs2, ir->imm, or, VI)}, GEN({
+         assert; /* FIXME: Implement */
+     }))
+#undef op_or
+
+#define op_xor(a, b) ((a) ^ (b))
+RVOP(vxor_vv, {VECTOR_DISPATCH(ir->vd, ir->vs2, ir->vs1, xor, VV)}, GEN({
+         assert; /* FIXME: Implement */
+     }))
+
+RVOP(vxor_vx, {VECTOR_DISPATCH(ir->vd, ir->vs2, ir->rs1, xor, VX)}, GEN({
+         assert; /* FIXME: Implement */
+     }))
+
+RVOP(vxor_vi, {VECTOR_DISPATCH(ir->vd, ir->vs2, ir->imm, xor, VI)}, GEN({
+         assert; /* FIXME: Implement */
+     }))
+#undef op_xor
+
+RVOP(
+    vrgather_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vrgather_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vrgather_vi,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vslideup_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vslideup_vi,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vrgatherei16_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vslidedown_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vslidedown_vi,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vadc_vvm,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vadc_vxm,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vadc_vim,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmadc_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmadc_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmadc_vi,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsbc_vvm,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsbc_vxm,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmsbc_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmsbc_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmerge_vvm,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmerge_vxm,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmerge_vim,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+#define op_vmv(a, b) (((a) & 0) + (b))
+RVOP(vmv_v_v, {VECTOR_DISPATCH(ir->vd, 0, ir->vs1, vmv, VV)}, GEN({
+         assert; /* FIXME: Implement */
+     }))
+
+RVOP(vmv_v_x, {VECTOR_DISPATCH(ir->vd, 0, ir->rs1, vmv, VX)}, GEN({
+         assert; /* FIXME: Implement */
+     }))
+
+RVOP(vmv_v_i, {VECTOR_DISPATCH(ir->vd, 0, ir->imm, vmv, VI)}, GEN({
+         assert; /* FIXME: Implement */
+     }))
+#undef op_vmv
+
+RVOP(
+    vmseq_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmseq_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmseq_vi,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmsne_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmsne_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmsne_vi,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmsltu_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmsltu_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmslt_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmslt_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmsleu_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmsleu_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmsleu_vi,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmsle_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmsle_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmsle_vi,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmsgtu_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmsgtu_vi,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmsgt_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmsgt_vi,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsaddu_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsaddu_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsaddu_vi,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsadd_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsadd_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsadd_vi,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssubu_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssubu_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssub_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssub_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+#define op_sll(a, b) \
+    ((a) << ((b) & ((8 << ((rv->csr_vtype >> 3) & 0b111)) - 1)))
+RVOP(vsll_vv, {VECTOR_DISPATCH(ir->vd, ir->vs2, ir->vs1, sll, VV)}, GEN({
+         assert; /* FIXME: Implement */
+     }))
+
+RVOP(vsll_vx, {VECTOR_DISPATCH(ir->vd, ir->vs2, ir->rs1, sll, VX)}, GEN({
+         assert; /* FIXME: Implement */
+     }))
+
+RVOP(vsll_vi, {VECTOR_DISPATCH(ir->vd, ir->vs2, ir->imm, sll, VI)}, GEN({
+         assert; /* FIXME: Implement */
+     }))
+#undef op_sll
+
+RVOP(
+    vsmul_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsmul_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsrl_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsrl_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsrl_vi,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsra_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsra_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vsra_vi,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssrl_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssrl_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssrl_vi,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssra_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssra_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vssra_vi,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vnsrl_wv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vnsrl_wx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vnsrl_wi,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vnsra_wv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vnsra_wx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vnsra_wi,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vnclipu_wv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vnclipu_wx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vnclipu_wi,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vnclip_wv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vnclip_wx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vnclip_wi,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwredsumu_vs,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwredsum_vs,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+
+
+RVOP(
+    vredsum_vs,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vredand_vs,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vredor_vs,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vredxor_vs,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vredminu_vs,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vredmin_vs,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vredmaxu_vs,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vredmax_vs,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vaaddu_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vaaddu_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vaadd_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vaadd_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vasubu_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vasubu_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vasub_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vasub_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vslide1up_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vslide1down_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vcompress_vm,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmandn_mm,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmand_mm,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmor_mm,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmxor_mm,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmorn_mm,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmnand_mm,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmnor_mm,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmxnor_mm,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vdivu_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vdivu_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vdiv_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vdiv_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vremu_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vremu_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vrem_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vrem_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmulhu_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmulhu_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+#define op_mul(a, b) ((a) * (b))
+RVOP(vmul_vv, {VECTOR_DISPATCH(ir->vd, ir->vs2, ir->vs1, mul, VV)}, GEN({
+         assert; /* FIXME: Implement */
+     }))
+
+RVOP(vmul_vx, {VECTOR_DISPATCH(ir->vd, ir->vs2, ir->rs1, mul, VX)}, GEN({
+         assert; /* FIXME: Implement */
+     }))
+#undef op_mul
+
+RVOP(
+    vmulhsu_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmulhsu_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmulh_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmulh_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmadd_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmadd_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vnmsub_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vnmsub_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmacc_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmacc_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vnmsac_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vnmsac_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwaddu_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwaddu_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwadd_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwadd_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwsubu_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwsubu_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwsub_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwsub_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwaddu_wv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwaddu_wx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwadd_wv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwadd_wx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwsubu_wv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwsubu_wx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwsub_wv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwsub_wx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwmulu_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwmulu_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwmulsu_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwmulsu_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwmul_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwmul_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwmaccu_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwmaccu_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwmacc_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwmacc_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwmaccus_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwmaccsu_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vwmaccsu_vx,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmv_s_x,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmv_x_s,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vcpop_m,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfirst_m,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmsbf_m,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmsof_m,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmsif_m,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    viota_m,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vid_v,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+
+RVOP(
+    vfadd_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfadd_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfredusum_vs,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfsub_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfsub_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfredosum_vs,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfmin_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfmin_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfredmin_vs,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfmax_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfmax_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfredmax_vs,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfsgnj_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfsgnj_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfsgnjn_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfsgnjn_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfsgnjx_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfsgnjx_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfslide1up_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfslide1down_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfmerge_vfm,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfmv_v_f,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmfeq_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmfeq_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmfle_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmfle_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmflt_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmflt_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmfne_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmfne_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmfgt_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vmfge_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfdiv_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfdiv_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfrdiv_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfmul_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfmul_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfrsub_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfmadd_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfmadd_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfnmadd_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfnmadd_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfmsub_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfmsub_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfnmsub_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfnmsub_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfmacc_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfmacc_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfnmacc_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfnmacc_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfmsac_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfmsac_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfnmsac_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfnmsac_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfwadd_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfwadd_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfwredusum_vs,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfwsub_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfwsub_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfwredosum_vs,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfwadd_wv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfwadd_wf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfwsub_wv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfwsub_wf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfwmul_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfwmul_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfwmacc_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfwmacc_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfwnmacc_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfwnmacc_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfwmsac_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfwmsac_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfwnmsac_vv,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+RVOP(
+    vfwnmsac_vf,
+    { V_NOP; },
+    GEN({
+        assert; /* FIXME: Implement */
+    }))
+
+#endif